1235783Skib/*
2235783Skib * Copyright �� 2006-2007 Intel Corporation
3235783Skib *
4235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
5235783Skib * copy of this software and associated documentation files (the "Software"),
6235783Skib * to deal in the Software without restriction, including without limitation
7235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8235783Skib * and/or sell copies of the Software, and to permit persons to whom the
9235783Skib * Software is furnished to do so, subject to the following conditions:
10235783Skib *
11235783Skib * The above copyright notice and this permission notice (including the next
12235783Skib * paragraph) shall be included in all copies or substantial portions of the
13235783Skib * Software.
14235783Skib *
15235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18235783Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19235783Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20235783Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21235783Skib * DEALINGS IN THE SOFTWARE.
22235783Skib *
23235783Skib * Authors:
24235783Skib *	Eric Anholt <eric@anholt.net>
25235783Skib */
26235783Skib
27235783Skib#include <sys/cdefs.h>
28235783Skib__FBSDID("$FreeBSD$");
29235783Skib
30235783Skib#include <dev/drm2/drmP.h>
31235783Skib#include <dev/drm2/drm.h>
32235783Skib#include <dev/drm2/i915/i915_drm.h>
33235783Skib#include <dev/drm2/i915/i915_drv.h>
34235783Skib#include <dev/drm2/i915/intel_drv.h>
35235783Skib#include <dev/drm2/drm_edid.h>
36235783Skib#include <dev/drm2/drm_dp_helper.h>
37235783Skib#include <dev/drm2/drm_crtc_helper.h>
38235783Skib#include <sys/kdb.h>
39235783Skib#include <sys/limits.h>
40235783Skib
41235783Skib#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
42235783Skib
43235783Skibbool intel_pipe_has_type(struct drm_crtc *crtc, int type);
44235783Skibstatic void intel_update_watermarks(struct drm_device *dev);
45235783Skibstatic void intel_increase_pllclock(struct drm_crtc *crtc);
46235783Skibstatic void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47235783Skib
48235783Skibtypedef struct {
49235783Skib	/* given values */
50235783Skib	int n;
51235783Skib	int m1, m2;
52235783Skib	int p1, p2;
53235783Skib	/* derived values */
54235783Skib	int	dot;
55235783Skib	int	vco;
56235783Skib	int	m;
57235783Skib	int	p;
58235783Skib} intel_clock_t;
59235783Skib
60235783Skibtypedef struct {
61235783Skib	int	min, max;
62235783Skib} intel_range_t;
63235783Skib
64235783Skibtypedef struct {
65235783Skib	int	dot_limit;
66235783Skib	int	p2_slow, p2_fast;
67235783Skib} intel_p2_t;
68235783Skib
69235783Skib#define INTEL_P2_NUM		      2
70235783Skibtypedef struct intel_limit intel_limit_t;
71235783Skibstruct intel_limit {
72235783Skib	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
73235783Skib	intel_p2_t	    p2;
74235783Skib	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
75235783Skib			int, int, intel_clock_t *, intel_clock_t *);
76235783Skib};
77235783Skib
78235783Skib/* FDI */
79235783Skib#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
80235783Skib
81235783Skibstatic bool
82235783Skibintel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
83235783Skib		    int target, int refclk, intel_clock_t *match_clock,
84235783Skib		    intel_clock_t *best_clock);
85235783Skibstatic bool
86235783Skibintel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87235783Skib			int target, int refclk, intel_clock_t *match_clock,
88235783Skib			intel_clock_t *best_clock);
89235783Skib
90235783Skibstatic bool
91235783Skibintel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
92235783Skib		      int target, int refclk, intel_clock_t *match_clock,
93235783Skib		      intel_clock_t *best_clock);
94235783Skibstatic bool
95235783Skibintel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
96235783Skib			   int target, int refclk, intel_clock_t *match_clock,
97235783Skib			   intel_clock_t *best_clock);
98235783Skib
99235783Skibstatic inline u32 /* units of 100MHz */
100235783Skibintel_fdi_link_freq(struct drm_device *dev)
101235783Skib{
102235783Skib	if (IS_GEN5(dev)) {
103235783Skib		struct drm_i915_private *dev_priv = dev->dev_private;
104235783Skib		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
105235783Skib	} else
106235783Skib		return 27;
107235783Skib}
108235783Skib
109235783Skibstatic const intel_limit_t intel_limits_i8xx_dvo = {
110235783Skib	.dot = { .min = 25000, .max = 350000 },
111235783Skib	.vco = { .min = 930000, .max = 1400000 },
112235783Skib	.n = { .min = 3, .max = 16 },
113235783Skib	.m = { .min = 96, .max = 140 },
114235783Skib	.m1 = { .min = 18, .max = 26 },
115235783Skib	.m2 = { .min = 6, .max = 16 },
116235783Skib	.p = { .min = 4, .max = 128 },
117235783Skib	.p1 = { .min = 2, .max = 33 },
118235783Skib	.p2 = { .dot_limit = 165000,
119235783Skib		.p2_slow = 4, .p2_fast = 2 },
120235783Skib	.find_pll = intel_find_best_PLL,
121235783Skib};
122235783Skib
123235783Skibstatic const intel_limit_t intel_limits_i8xx_lvds = {
124235783Skib	.dot = { .min = 25000, .max = 350000 },
125235783Skib	.vco = { .min = 930000, .max = 1400000 },
126235783Skib	.n = { .min = 3, .max = 16 },
127235783Skib	.m = { .min = 96, .max = 140 },
128235783Skib	.m1 = { .min = 18, .max = 26 },
129235783Skib	.m2 = { .min = 6, .max = 16 },
130235783Skib	.p = { .min = 4, .max = 128 },
131235783Skib	.p1 = { .min = 1, .max = 6 },
132235783Skib	.p2 = { .dot_limit = 165000,
133235783Skib		.p2_slow = 14, .p2_fast = 7 },
134235783Skib	.find_pll = intel_find_best_PLL,
135235783Skib};
136235783Skib
137235783Skibstatic const intel_limit_t intel_limits_i9xx_sdvo = {
138235783Skib	.dot = { .min = 20000, .max = 400000 },
139235783Skib	.vco = { .min = 1400000, .max = 2800000 },
140235783Skib	.n = { .min = 1, .max = 6 },
141235783Skib	.m = { .min = 70, .max = 120 },
142235783Skib	.m1 = { .min = 10, .max = 22 },
143235783Skib	.m2 = { .min = 5, .max = 9 },
144235783Skib	.p = { .min = 5, .max = 80 },
145235783Skib	.p1 = { .min = 1, .max = 8 },
146235783Skib	.p2 = { .dot_limit = 200000,
147235783Skib		.p2_slow = 10, .p2_fast = 5 },
148235783Skib	.find_pll = intel_find_best_PLL,
149235783Skib};
150235783Skib
151235783Skibstatic const intel_limit_t intel_limits_i9xx_lvds = {
152235783Skib	.dot = { .min = 20000, .max = 400000 },
153235783Skib	.vco = { .min = 1400000, .max = 2800000 },
154235783Skib	.n = { .min = 1, .max = 6 },
155235783Skib	.m = { .min = 70, .max = 120 },
156235783Skib	.m1 = { .min = 10, .max = 22 },
157235783Skib	.m2 = { .min = 5, .max = 9 },
158235783Skib	.p = { .min = 7, .max = 98 },
159235783Skib	.p1 = { .min = 1, .max = 8 },
160235783Skib	.p2 = { .dot_limit = 112000,
161235783Skib		.p2_slow = 14, .p2_fast = 7 },
162235783Skib	.find_pll = intel_find_best_PLL,
163235783Skib};
164235783Skib
165235783Skib
166235783Skibstatic const intel_limit_t intel_limits_g4x_sdvo = {
167235783Skib	.dot = { .min = 25000, .max = 270000 },
168235783Skib	.vco = { .min = 1750000, .max = 3500000},
169235783Skib	.n = { .min = 1, .max = 4 },
170235783Skib	.m = { .min = 104, .max = 138 },
171235783Skib	.m1 = { .min = 17, .max = 23 },
172235783Skib	.m2 = { .min = 5, .max = 11 },
173235783Skib	.p = { .min = 10, .max = 30 },
174235783Skib	.p1 = { .min = 1, .max = 3},
175235783Skib	.p2 = { .dot_limit = 270000,
176235783Skib		.p2_slow = 10,
177235783Skib		.p2_fast = 10
178235783Skib	},
179235783Skib	.find_pll = intel_g4x_find_best_PLL,
180235783Skib};
181235783Skib
182235783Skibstatic const intel_limit_t intel_limits_g4x_hdmi = {
183235783Skib	.dot = { .min = 22000, .max = 400000 },
184235783Skib	.vco = { .min = 1750000, .max = 3500000},
185235783Skib	.n = { .min = 1, .max = 4 },
186235783Skib	.m = { .min = 104, .max = 138 },
187235783Skib	.m1 = { .min = 16, .max = 23 },
188235783Skib	.m2 = { .min = 5, .max = 11 },
189235783Skib	.p = { .min = 5, .max = 80 },
190235783Skib	.p1 = { .min = 1, .max = 8},
191235783Skib	.p2 = { .dot_limit = 165000,
192235783Skib		.p2_slow = 10, .p2_fast = 5 },
193235783Skib	.find_pll = intel_g4x_find_best_PLL,
194235783Skib};
195235783Skib
196235783Skibstatic const intel_limit_t intel_limits_g4x_single_channel_lvds = {
197235783Skib	.dot = { .min = 20000, .max = 115000 },
198235783Skib	.vco = { .min = 1750000, .max = 3500000 },
199235783Skib	.n = { .min = 1, .max = 3 },
200235783Skib	.m = { .min = 104, .max = 138 },
201235783Skib	.m1 = { .min = 17, .max = 23 },
202235783Skib	.m2 = { .min = 5, .max = 11 },
203235783Skib	.p = { .min = 28, .max = 112 },
204235783Skib	.p1 = { .min = 2, .max = 8 },
205235783Skib	.p2 = { .dot_limit = 0,
206235783Skib		.p2_slow = 14, .p2_fast = 14
207235783Skib	},
208235783Skib	.find_pll = intel_g4x_find_best_PLL,
209235783Skib};
210235783Skib
211235783Skibstatic const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
212235783Skib	.dot = { .min = 80000, .max = 224000 },
213235783Skib	.vco = { .min = 1750000, .max = 3500000 },
214235783Skib	.n = { .min = 1, .max = 3 },
215235783Skib	.m = { .min = 104, .max = 138 },
216235783Skib	.m1 = { .min = 17, .max = 23 },
217235783Skib	.m2 = { .min = 5, .max = 11 },
218235783Skib	.p = { .min = 14, .max = 42 },
219235783Skib	.p1 = { .min = 2, .max = 6 },
220235783Skib	.p2 = { .dot_limit = 0,
221235783Skib		.p2_slow = 7, .p2_fast = 7
222235783Skib	},
223235783Skib	.find_pll = intel_g4x_find_best_PLL,
224235783Skib};
225235783Skib
226235783Skibstatic const intel_limit_t intel_limits_g4x_display_port = {
227235783Skib	.dot = { .min = 161670, .max = 227000 },
228235783Skib	.vco = { .min = 1750000, .max = 3500000},
229235783Skib	.n = { .min = 1, .max = 2 },
230235783Skib	.m = { .min = 97, .max = 108 },
231235783Skib	.m1 = { .min = 0x10, .max = 0x12 },
232235783Skib	.m2 = { .min = 0x05, .max = 0x06 },
233235783Skib	.p = { .min = 10, .max = 20 },
234235783Skib	.p1 = { .min = 1, .max = 2},
235235783Skib	.p2 = { .dot_limit = 0,
236235783Skib		.p2_slow = 10, .p2_fast = 10 },
237235783Skib	.find_pll = intel_find_pll_g4x_dp,
238235783Skib};
239235783Skib
240235783Skibstatic const intel_limit_t intel_limits_pineview_sdvo = {
241235783Skib	.dot = { .min = 20000, .max = 400000},
242235783Skib	.vco = { .min = 1700000, .max = 3500000 },
243235783Skib	/* Pineview's Ncounter is a ring counter */
244235783Skib	.n = { .min = 3, .max = 6 },
245235783Skib	.m = { .min = 2, .max = 256 },
246235783Skib	/* Pineview only has one combined m divider, which we treat as m2. */
247235783Skib	.m1 = { .min = 0, .max = 0 },
248235783Skib	.m2 = { .min = 0, .max = 254 },
249235783Skib	.p = { .min = 5, .max = 80 },
250235783Skib	.p1 = { .min = 1, .max = 8 },
251235783Skib	.p2 = { .dot_limit = 200000,
252235783Skib		.p2_slow = 10, .p2_fast = 5 },
253235783Skib	.find_pll = intel_find_best_PLL,
254235783Skib};
255235783Skib
256235783Skibstatic const intel_limit_t intel_limits_pineview_lvds = {
257235783Skib	.dot = { .min = 20000, .max = 400000 },
258235783Skib	.vco = { .min = 1700000, .max = 3500000 },
259235783Skib	.n = { .min = 3, .max = 6 },
260235783Skib	.m = { .min = 2, .max = 256 },
261235783Skib	.m1 = { .min = 0, .max = 0 },
262235783Skib	.m2 = { .min = 0, .max = 254 },
263235783Skib	.p = { .min = 7, .max = 112 },
264235783Skib	.p1 = { .min = 1, .max = 8 },
265235783Skib	.p2 = { .dot_limit = 112000,
266235783Skib		.p2_slow = 14, .p2_fast = 14 },
267235783Skib	.find_pll = intel_find_best_PLL,
268235783Skib};
269235783Skib
270235783Skib/* Ironlake / Sandybridge
271235783Skib *
272235783Skib * We calculate clock using (register_value + 2) for N/M1/M2, so here
273235783Skib * the range value for them is (actual_value - 2).
274235783Skib */
275235783Skibstatic const intel_limit_t intel_limits_ironlake_dac = {
276235783Skib	.dot = { .min = 25000, .max = 350000 },
277235783Skib	.vco = { .min = 1760000, .max = 3510000 },
278235783Skib	.n = { .min = 1, .max = 5 },
279235783Skib	.m = { .min = 79, .max = 127 },
280235783Skib	.m1 = { .min = 12, .max = 22 },
281235783Skib	.m2 = { .min = 5, .max = 9 },
282235783Skib	.p = { .min = 5, .max = 80 },
283235783Skib	.p1 = { .min = 1, .max = 8 },
284235783Skib	.p2 = { .dot_limit = 225000,
285235783Skib		.p2_slow = 10, .p2_fast = 5 },
286235783Skib	.find_pll = intel_g4x_find_best_PLL,
287235783Skib};
288235783Skib
289235783Skibstatic const intel_limit_t intel_limits_ironlake_single_lvds = {
290235783Skib	.dot = { .min = 25000, .max = 350000 },
291235783Skib	.vco = { .min = 1760000, .max = 3510000 },
292235783Skib	.n = { .min = 1, .max = 3 },
293235783Skib	.m = { .min = 79, .max = 118 },
294235783Skib	.m1 = { .min = 12, .max = 22 },
295235783Skib	.m2 = { .min = 5, .max = 9 },
296235783Skib	.p = { .min = 28, .max = 112 },
297235783Skib	.p1 = { .min = 2, .max = 8 },
298235783Skib	.p2 = { .dot_limit = 225000,
299235783Skib		.p2_slow = 14, .p2_fast = 14 },
300235783Skib	.find_pll = intel_g4x_find_best_PLL,
301235783Skib};
302235783Skib
303235783Skibstatic const intel_limit_t intel_limits_ironlake_dual_lvds = {
304235783Skib	.dot = { .min = 25000, .max = 350000 },
305235783Skib	.vco = { .min = 1760000, .max = 3510000 },
306235783Skib	.n = { .min = 1, .max = 3 },
307235783Skib	.m = { .min = 79, .max = 127 },
308235783Skib	.m1 = { .min = 12, .max = 22 },
309235783Skib	.m2 = { .min = 5, .max = 9 },
310235783Skib	.p = { .min = 14, .max = 56 },
311235783Skib	.p1 = { .min = 2, .max = 8 },
312235783Skib	.p2 = { .dot_limit = 225000,
313235783Skib		.p2_slow = 7, .p2_fast = 7 },
314235783Skib	.find_pll = intel_g4x_find_best_PLL,
315235783Skib};
316235783Skib
317235783Skib/* LVDS 100mhz refclk limits. */
318235783Skibstatic const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
319235783Skib	.dot = { .min = 25000, .max = 350000 },
320235783Skib	.vco = { .min = 1760000, .max = 3510000 },
321235783Skib	.n = { .min = 1, .max = 2 },
322235783Skib	.m = { .min = 79, .max = 126 },
323235783Skib	.m1 = { .min = 12, .max = 22 },
324235783Skib	.m2 = { .min = 5, .max = 9 },
325235783Skib	.p = { .min = 28, .max = 112 },
326235783Skib	.p1 = { .min = 2, .max = 8 },
327235783Skib	.p2 = { .dot_limit = 225000,
328235783Skib		.p2_slow = 14, .p2_fast = 14 },
329235783Skib	.find_pll = intel_g4x_find_best_PLL,
330235783Skib};
331235783Skib
332235783Skibstatic const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
333235783Skib	.dot = { .min = 25000, .max = 350000 },
334235783Skib	.vco = { .min = 1760000, .max = 3510000 },
335235783Skib	.n = { .min = 1, .max = 3 },
336235783Skib	.m = { .min = 79, .max = 126 },
337235783Skib	.m1 = { .min = 12, .max = 22 },
338235783Skib	.m2 = { .min = 5, .max = 9 },
339235783Skib	.p = { .min = 14, .max = 42 },
340235783Skib	.p1 = { .min = 2, .max = 6 },
341235783Skib	.p2 = { .dot_limit = 225000,
342235783Skib		.p2_slow = 7, .p2_fast = 7 },
343235783Skib	.find_pll = intel_g4x_find_best_PLL,
344235783Skib};
345235783Skib
346235783Skibstatic const intel_limit_t intel_limits_ironlake_display_port = {
347235783Skib	.dot = { .min = 25000, .max = 350000 },
348235783Skib	.vco = { .min = 1760000, .max = 3510000},
349235783Skib	.n = { .min = 1, .max = 2 },
350235783Skib	.m = { .min = 81, .max = 90 },
351235783Skib	.m1 = { .min = 12, .max = 22 },
352235783Skib	.m2 = { .min = 5, .max = 9 },
353235783Skib	.p = { .min = 10, .max = 20 },
354235783Skib	.p1 = { .min = 1, .max = 2},
355235783Skib	.p2 = { .dot_limit = 0,
356235783Skib		.p2_slow = 10, .p2_fast = 10 },
357235783Skib	.find_pll = intel_find_pll_ironlake_dp,
358235783Skib};
359235783Skib
360235783Skibstatic const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
361235783Skib						int refclk)
362235783Skib{
363235783Skib	struct drm_device *dev = crtc->dev;
364235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
365235783Skib	const intel_limit_t *limit;
366235783Skib
367235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
368235783Skib		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
369235783Skib		    LVDS_CLKB_POWER_UP) {
370235783Skib			/* LVDS dual channel */
371235783Skib			if (refclk == 100000)
372235783Skib				limit = &intel_limits_ironlake_dual_lvds_100m;
373235783Skib			else
374235783Skib				limit = &intel_limits_ironlake_dual_lvds;
375235783Skib		} else {
376235783Skib			if (refclk == 100000)
377235783Skib				limit = &intel_limits_ironlake_single_lvds_100m;
378235783Skib			else
379235783Skib				limit = &intel_limits_ironlake_single_lvds;
380235783Skib		}
381235783Skib	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
382235783Skib			HAS_eDP)
383235783Skib		limit = &intel_limits_ironlake_display_port;
384235783Skib	else
385235783Skib		limit = &intel_limits_ironlake_dac;
386235783Skib
387235783Skib	return limit;
388235783Skib}
389235783Skib
390235783Skibstatic const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
391235783Skib{
392235783Skib	struct drm_device *dev = crtc->dev;
393235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
394235783Skib	const intel_limit_t *limit;
395235783Skib
396235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397235783Skib		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
398235783Skib		    LVDS_CLKB_POWER_UP)
399235783Skib			/* LVDS with dual channel */
400235783Skib			limit = &intel_limits_g4x_dual_channel_lvds;
401235783Skib		else
402235783Skib			/* LVDS with dual channel */
403235783Skib			limit = &intel_limits_g4x_single_channel_lvds;
404235783Skib	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
405235783Skib		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
406235783Skib		limit = &intel_limits_g4x_hdmi;
407235783Skib	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
408235783Skib		limit = &intel_limits_g4x_sdvo;
409235783Skib	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
410235783Skib		limit = &intel_limits_g4x_display_port;
411235783Skib	} else /* The option is for other outputs */
412235783Skib		limit = &intel_limits_i9xx_sdvo;
413235783Skib
414235783Skib	return limit;
415235783Skib}
416235783Skib
417235783Skibstatic const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
418235783Skib{
419235783Skib	struct drm_device *dev = crtc->dev;
420235783Skib	const intel_limit_t *limit;
421235783Skib
422235783Skib	if (HAS_PCH_SPLIT(dev))
423235783Skib		limit = intel_ironlake_limit(crtc, refclk);
424235783Skib	else if (IS_G4X(dev)) {
425235783Skib		limit = intel_g4x_limit(crtc);
426235783Skib	} else if (IS_PINEVIEW(dev)) {
427235783Skib		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428235783Skib			limit = &intel_limits_pineview_lvds;
429235783Skib		else
430235783Skib			limit = &intel_limits_pineview_sdvo;
431235783Skib	} else if (!IS_GEN2(dev)) {
432235783Skib		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
433235783Skib			limit = &intel_limits_i9xx_lvds;
434235783Skib		else
435235783Skib			limit = &intel_limits_i9xx_sdvo;
436235783Skib	} else {
437235783Skib		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
438235783Skib			limit = &intel_limits_i8xx_lvds;
439235783Skib		else
440235783Skib			limit = &intel_limits_i8xx_dvo;
441235783Skib	}
442235783Skib	return limit;
443235783Skib}
444235783Skib
445235783Skib/* m1 is reserved as 0 in Pineview, n is a ring counter */
446235783Skibstatic void pineview_clock(int refclk, intel_clock_t *clock)
447235783Skib{
448235783Skib	clock->m = clock->m2 + 2;
449235783Skib	clock->p = clock->p1 * clock->p2;
450235783Skib	clock->vco = refclk * clock->m / clock->n;
451235783Skib	clock->dot = clock->vco / clock->p;
452235783Skib}
453235783Skib
454235783Skibstatic void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
455235783Skib{
456235783Skib	if (IS_PINEVIEW(dev)) {
457235783Skib		pineview_clock(refclk, clock);
458235783Skib		return;
459235783Skib	}
460235783Skib	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
461235783Skib	clock->p = clock->p1 * clock->p2;
462235783Skib	clock->vco = refclk * clock->m / (clock->n + 2);
463235783Skib	clock->dot = clock->vco / clock->p;
464235783Skib}
465235783Skib
466235783Skib/**
467235783Skib * Returns whether any output on the specified pipe is of the specified type
468235783Skib */
469235783Skibbool intel_pipe_has_type(struct drm_crtc *crtc, int type)
470235783Skib{
471235783Skib	struct drm_device *dev = crtc->dev;
472235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
473235783Skib	struct intel_encoder *encoder;
474235783Skib
475235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
476235783Skib		if (encoder->base.crtc == crtc && encoder->type == type)
477235783Skib			return true;
478235783Skib
479235783Skib	return false;
480235783Skib}
481235783Skib
482235783Skib#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
483235783Skib/**
484235783Skib * Returns whether the given set of divisors are valid for a given refclk with
485235783Skib * the given connectors.
486235783Skib */
487235783Skib
488235783Skibstatic bool intel_PLL_is_valid(struct drm_device *dev,
489235783Skib			       const intel_limit_t *limit,
490235783Skib			       const intel_clock_t *clock)
491235783Skib{
492235783Skib	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
493235783Skib		INTELPllInvalid("p1 out of range\n");
494235783Skib	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
495235783Skib		INTELPllInvalid("p out of range\n");
496235783Skib	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
497235783Skib		INTELPllInvalid("m2 out of range\n");
498235783Skib	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
499235783Skib		INTELPllInvalid("m1 out of range\n");
500235783Skib	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
501235783Skib		INTELPllInvalid("m1 <= m2\n");
502235783Skib	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
503235783Skib		INTELPllInvalid("m out of range\n");
504235783Skib	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
505235783Skib		INTELPllInvalid("n out of range\n");
506235783Skib	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
507235783Skib		INTELPllInvalid("vco out of range\n");
508235783Skib	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
509235783Skib	 * connector, etc., rather than just a single range.
510235783Skib	 */
511235783Skib	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
512235783Skib		INTELPllInvalid("dot out of range\n");
513235783Skib
514235783Skib	return true;
515235783Skib}
516235783Skib
517235783Skibstatic bool
518235783Skibintel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
519235783Skib		    int target, int refclk, intel_clock_t *match_clock,
520235783Skib		    intel_clock_t *best_clock)
521235783Skib
522235783Skib{
523235783Skib	struct drm_device *dev = crtc->dev;
524235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
525235783Skib	intel_clock_t clock;
526235783Skib	int err = target;
527235783Skib
528235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
529235783Skib	    (I915_READ(LVDS)) != 0) {
530235783Skib		/*
531235783Skib		 * For LVDS, if the panel is on, just rely on its current
532235783Skib		 * settings for dual-channel.  We haven't figured out how to
533235783Skib		 * reliably set up different single/dual channel state, if we
534235783Skib		 * even can.
535235783Skib		 */
536235783Skib		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
537235783Skib		    LVDS_CLKB_POWER_UP)
538235783Skib			clock.p2 = limit->p2.p2_fast;
539235783Skib		else
540235783Skib			clock.p2 = limit->p2.p2_slow;
541235783Skib	} else {
542235783Skib		if (target < limit->p2.dot_limit)
543235783Skib			clock.p2 = limit->p2.p2_slow;
544235783Skib		else
545235783Skib			clock.p2 = limit->p2.p2_fast;
546235783Skib	}
547235783Skib
548235783Skib	memset(best_clock, 0, sizeof(*best_clock));
549235783Skib
550235783Skib	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
551235783Skib	     clock.m1++) {
552235783Skib		for (clock.m2 = limit->m2.min;
553235783Skib		     clock.m2 <= limit->m2.max; clock.m2++) {
554235783Skib			/* m1 is always 0 in Pineview */
555235783Skib			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
556235783Skib				break;
557235783Skib			for (clock.n = limit->n.min;
558235783Skib			     clock.n <= limit->n.max; clock.n++) {
559235783Skib				for (clock.p1 = limit->p1.min;
560235783Skib					clock.p1 <= limit->p1.max; clock.p1++) {
561235783Skib					int this_err;
562235783Skib
563235783Skib					intel_clock(dev, refclk, &clock);
564235783Skib					if (!intel_PLL_is_valid(dev, limit,
565235783Skib								&clock))
566235783Skib						continue;
567235783Skib					if (match_clock &&
568235783Skib					    clock.p != match_clock->p)
569235783Skib						continue;
570235783Skib
571235783Skib					this_err = abs(clock.dot - target);
572235783Skib					if (this_err < err) {
573235783Skib						*best_clock = clock;
574235783Skib						err = this_err;
575235783Skib					}
576235783Skib				}
577235783Skib			}
578235783Skib		}
579235783Skib	}
580235783Skib
581235783Skib	return (err != target);
582235783Skib}
583235783Skib
584235783Skibstatic bool
585235783Skibintel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
586235783Skib			int target, int refclk, intel_clock_t *match_clock,
587235783Skib			intel_clock_t *best_clock)
588235783Skib{
589235783Skib	struct drm_device *dev = crtc->dev;
590235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
591235783Skib	intel_clock_t clock;
592235783Skib	int max_n;
593235783Skib	bool found;
594235783Skib	/* approximately equals target * 0.00585 */
595235783Skib	int err_most = (target >> 8) + (target >> 9);
596235783Skib	found = false;
597235783Skib
598235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
599235783Skib		int lvds_reg;
600235783Skib
601235783Skib		if (HAS_PCH_SPLIT(dev))
602235783Skib			lvds_reg = PCH_LVDS;
603235783Skib		else
604235783Skib			lvds_reg = LVDS;
605235783Skib		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
606235783Skib		    LVDS_CLKB_POWER_UP)
607235783Skib			clock.p2 = limit->p2.p2_fast;
608235783Skib		else
609235783Skib			clock.p2 = limit->p2.p2_slow;
610235783Skib	} else {
611235783Skib		if (target < limit->p2.dot_limit)
612235783Skib			clock.p2 = limit->p2.p2_slow;
613235783Skib		else
614235783Skib			clock.p2 = limit->p2.p2_fast;
615235783Skib	}
616235783Skib
617235783Skib	memset(best_clock, 0, sizeof(*best_clock));
618235783Skib	max_n = limit->n.max;
619235783Skib	/* based on hardware requirement, prefer smaller n to precision */
620235783Skib	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
621235783Skib		/* based on hardware requirement, prefere larger m1,m2 */
622235783Skib		for (clock.m1 = limit->m1.max;
623235783Skib		     clock.m1 >= limit->m1.min; clock.m1--) {
624235783Skib			for (clock.m2 = limit->m2.max;
625235783Skib			     clock.m2 >= limit->m2.min; clock.m2--) {
626235783Skib				for (clock.p1 = limit->p1.max;
627235783Skib				     clock.p1 >= limit->p1.min; clock.p1--) {
628235783Skib					int this_err;
629235783Skib
630235783Skib					intel_clock(dev, refclk, &clock);
631235783Skib					if (!intel_PLL_is_valid(dev, limit,
632235783Skib								&clock))
633235783Skib						continue;
634235783Skib					if (match_clock &&
635235783Skib					    clock.p != match_clock->p)
636235783Skib						continue;
637235783Skib
638235783Skib					this_err = abs(clock.dot - target);
639235783Skib					if (this_err < err_most) {
640235783Skib						*best_clock = clock;
641235783Skib						err_most = this_err;
642235783Skib						max_n = clock.n;
643235783Skib						found = true;
644235783Skib					}
645235783Skib				}
646235783Skib			}
647235783Skib		}
648235783Skib	}
649235783Skib	return found;
650235783Skib}
651235783Skib
652235783Skibstatic bool
653235783Skibintel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
654235783Skib			   int target, int refclk, intel_clock_t *match_clock,
655235783Skib			   intel_clock_t *best_clock)
656235783Skib{
657235783Skib	struct drm_device *dev = crtc->dev;
658235783Skib	intel_clock_t clock;
659235783Skib
660235783Skib	if (target < 200000) {
661235783Skib		clock.n = 1;
662235783Skib		clock.p1 = 2;
663235783Skib		clock.p2 = 10;
664235783Skib		clock.m1 = 12;
665235783Skib		clock.m2 = 9;
666235783Skib	} else {
667235783Skib		clock.n = 2;
668235783Skib		clock.p1 = 1;
669235783Skib		clock.p2 = 10;
670235783Skib		clock.m1 = 14;
671235783Skib		clock.m2 = 8;
672235783Skib	}
673235783Skib	intel_clock(dev, refclk, &clock);
674235783Skib	memcpy(best_clock, &clock, sizeof(intel_clock_t));
675235783Skib	return true;
676235783Skib}
677235783Skib
678235783Skib/* DisplayPort has only two frequencies, 162MHz and 270MHz */
679235783Skibstatic bool
680235783Skibintel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
681235783Skib		      int target, int refclk, intel_clock_t *match_clock,
682235783Skib		      intel_clock_t *best_clock)
683235783Skib{
684235783Skib	intel_clock_t clock;
685235783Skib	if (target < 200000) {
686235783Skib		clock.p1 = 2;
687235783Skib		clock.p2 = 10;
688235783Skib		clock.n = 2;
689235783Skib		clock.m1 = 23;
690235783Skib		clock.m2 = 8;
691235783Skib	} else {
692235783Skib		clock.p1 = 1;
693235783Skib		clock.p2 = 10;
694235783Skib		clock.n = 1;
695235783Skib		clock.m1 = 14;
696235783Skib		clock.m2 = 2;
697235783Skib	}
698235783Skib	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
699235783Skib	clock.p = (clock.p1 * clock.p2);
700235783Skib	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
701235783Skib	clock.vco = 0;
702235783Skib	memcpy(best_clock, &clock, sizeof(intel_clock_t));
703235783Skib	return true;
704235783Skib}
705235783Skib
706235783Skib/**
707235783Skib * intel_wait_for_vblank - wait for vblank on a given pipe
708235783Skib * @dev: drm device
709235783Skib * @pipe: pipe to wait for
710235783Skib *
711235783Skib * Wait for vblank to occur on a given pipe.  Needed for various bits of
712235783Skib * mode setting code.
713235783Skib */
714235783Skibvoid intel_wait_for_vblank(struct drm_device *dev, int pipe)
715235783Skib{
716235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
717235783Skib	int pipestat_reg = PIPESTAT(pipe);
718235783Skib
719235783Skib	/* Clear existing vblank status. Note this will clear any other
720235783Skib	 * sticky status fields as well.
721235783Skib	 *
722235783Skib	 * This races with i915_driver_irq_handler() with the result
723235783Skib	 * that either function could miss a vblank event.  Here it is not
724235783Skib	 * fatal, as we will either wait upon the next vblank interrupt or
725235783Skib	 * timeout.  Generally speaking intel_wait_for_vblank() is only
726235783Skib	 * called during modeset at which time the GPU should be idle and
727235783Skib	 * should *not* be performing page flips and thus not waiting on
728235783Skib	 * vblanks...
729235783Skib	 * Currently, the result of us stealing a vblank from the irq
730235783Skib	 * handler is that a single frame will be skipped during swapbuffers.
731235783Skib	 */
732235783Skib	I915_WRITE(pipestat_reg,
733235783Skib		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
734235783Skib
735235783Skib	/* Wait for vblank interrupt bit to set */
736235783Skib	if (_intel_wait_for(dev,
737235783Skib	    I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
738235783Skib	    50, 1, "915vbl"))
739235783Skib		DRM_DEBUG_KMS("vblank wait timed out\n");
740235783Skib}
741235783Skib
742235783Skib/*
743235783Skib * intel_wait_for_pipe_off - wait for pipe to turn off
744235783Skib * @dev: drm device
745235783Skib * @pipe: pipe to wait for
746235783Skib *
747235783Skib * After disabling a pipe, we can't wait for vblank in the usual way,
748235783Skib * spinning on the vblank interrupt status bit, since we won't actually
749235783Skib * see an interrupt when the pipe is disabled.
750235783Skib *
751235783Skib * On Gen4 and above:
752235783Skib *   wait for the pipe register state bit to turn off
753235783Skib *
754235783Skib * Otherwise:
755235783Skib *   wait for the display line value to settle (it usually
756235783Skib *   ends up stopping at the start of the next frame).
757235783Skib *
758235783Skib */
759235783Skibvoid intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
760235783Skib{
761235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
762235783Skib
763235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
764235783Skib		int reg = PIPECONF(pipe);
765235783Skib
766235783Skib		/* Wait for the Pipe State to go off */
767235783Skib		if (_intel_wait_for(dev,
768235783Skib		    (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
769235783Skib		    1, "915pip"))
770235783Skib			DRM_DEBUG_KMS("pipe_off wait timed out\n");
771235783Skib	} else {
772235783Skib		u32 last_line;
773235783Skib		int reg = PIPEDSL(pipe);
774235783Skib		unsigned long timeout = jiffies + msecs_to_jiffies(100);
775235783Skib
776235783Skib		/* Wait for the display line to settle */
777235783Skib		do {
778235783Skib			last_line = I915_READ(reg) & DSL_LINEMASK;
779235783Skib			DELAY(5000);
780235783Skib		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
781235783Skib			 time_after(timeout, jiffies));
782235783Skib		if (time_after(jiffies, timeout))
783235783Skib			DRM_DEBUG_KMS("pipe_off wait timed out\n");
784235783Skib	}
785235783Skib}
786235783Skib
787235783Skibstatic const char *state_string(bool enabled)
788235783Skib{
789235783Skib	return enabled ? "on" : "off";
790235783Skib}
791235783Skib
792235783Skib/* Only for pre-ILK configs */
793235783Skibstatic void assert_pll(struct drm_i915_private *dev_priv,
794235783Skib		       enum pipe pipe, bool state)
795235783Skib{
796235783Skib	int reg;
797235783Skib	u32 val;
798235783Skib	bool cur_state;
799235783Skib
800235783Skib	reg = DPLL(pipe);
801235783Skib	val = I915_READ(reg);
802235783Skib	cur_state = !!(val & DPLL_VCO_ENABLE);
803235783Skib	if (cur_state != state)
804235783Skib		printf("PLL state assertion failure (expected %s, current %s)\n",
805235783Skib		    state_string(state), state_string(cur_state));
806235783Skib}
807235783Skib#define assert_pll_enabled(d, p) assert_pll(d, p, true)
808235783Skib#define assert_pll_disabled(d, p) assert_pll(d, p, false)
809235783Skib
810235783Skib/* For ILK+ */
811235783Skibstatic void assert_pch_pll(struct drm_i915_private *dev_priv,
812235783Skib			   enum pipe pipe, bool state)
813235783Skib{
814235783Skib	int reg;
815235783Skib	u32 val;
816235783Skib	bool cur_state;
817235783Skib
818235783Skib	if (HAS_PCH_CPT(dev_priv->dev)) {
819235783Skib		u32 pch_dpll;
820235783Skib
821235783Skib		pch_dpll = I915_READ(PCH_DPLL_SEL);
822235783Skib
823235783Skib		/* Make sure the selected PLL is enabled to the transcoder */
824235783Skib		KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
825235783Skib		    ("transcoder %d PLL not enabled\n", pipe));
826235783Skib
827235783Skib		/* Convert the transcoder pipe number to a pll pipe number */
828235783Skib		pipe = (pch_dpll >> (4 * pipe)) & 1;
829235783Skib	}
830235783Skib
831235783Skib	reg = PCH_DPLL(pipe);
832235783Skib	val = I915_READ(reg);
833235783Skib	cur_state = !!(val & DPLL_VCO_ENABLE);
834235783Skib	if (cur_state != state)
835235783Skib		printf("PCH PLL state assertion failure (expected %s, current %s)\n",
836235783Skib		    state_string(state), state_string(cur_state));
837235783Skib}
838235783Skib#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
839235783Skib#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
840235783Skib
841235783Skibstatic void assert_fdi_tx(struct drm_i915_private *dev_priv,
842235783Skib			  enum pipe pipe, bool state)
843235783Skib{
844235783Skib	int reg;
845235783Skib	u32 val;
846235783Skib	bool cur_state;
847235783Skib
848235783Skib	reg = FDI_TX_CTL(pipe);
849235783Skib	val = I915_READ(reg);
850235783Skib	cur_state = !!(val & FDI_TX_ENABLE);
851235783Skib	if (cur_state != state)
852235783Skib		printf("FDI TX state assertion failure (expected %s, current %s)\n",
853235783Skib		    state_string(state), state_string(cur_state));
854235783Skib}
855235783Skib#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
856235783Skib#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
857235783Skib
858235783Skibstatic void assert_fdi_rx(struct drm_i915_private *dev_priv,
859235783Skib			  enum pipe pipe, bool state)
860235783Skib{
861235783Skib	int reg;
862235783Skib	u32 val;
863235783Skib	bool cur_state;
864235783Skib
865235783Skib	reg = FDI_RX_CTL(pipe);
866235783Skib	val = I915_READ(reg);
867235783Skib	cur_state = !!(val & FDI_RX_ENABLE);
868235783Skib	if (cur_state != state)
869235783Skib		printf("FDI RX state assertion failure (expected %s, current %s)\n",
870235783Skib		    state_string(state), state_string(cur_state));
871235783Skib}
872235783Skib#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
873235783Skib#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
874235783Skib
875235783Skibstatic void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
876235783Skib				      enum pipe pipe)
877235783Skib{
878235783Skib	int reg;
879235783Skib	u32 val;
880235783Skib
881235783Skib	/* ILK FDI PLL is always enabled */
882235783Skib	if (dev_priv->info->gen == 5)
883235783Skib		return;
884235783Skib
885235783Skib	reg = FDI_TX_CTL(pipe);
886235783Skib	val = I915_READ(reg);
887235783Skib	if (!(val & FDI_TX_PLL_ENABLE))
888235783Skib		printf("FDI TX PLL assertion failure, should be active but is disabled\n");
889235783Skib}
890235783Skib
891235783Skibstatic void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
892235783Skib				      enum pipe pipe)
893235783Skib{
894235783Skib	int reg;
895235783Skib	u32 val;
896235783Skib
897235783Skib	reg = FDI_RX_CTL(pipe);
898235783Skib	val = I915_READ(reg);
899235783Skib	if (!(val & FDI_RX_PLL_ENABLE))
900235783Skib		printf("FDI RX PLL assertion failure, should be active but is disabled\n");
901235783Skib}
902235783Skib
903235783Skibstatic void assert_panel_unlocked(struct drm_i915_private *dev_priv,
904235783Skib				  enum pipe pipe)
905235783Skib{
906235783Skib	int pp_reg, lvds_reg;
907235783Skib	u32 val;
908235783Skib	enum pipe panel_pipe = PIPE_A;
909235783Skib	bool locked = true;
910235783Skib
911235783Skib	if (HAS_PCH_SPLIT(dev_priv->dev)) {
912235783Skib		pp_reg = PCH_PP_CONTROL;
913235783Skib		lvds_reg = PCH_LVDS;
914235783Skib	} else {
915235783Skib		pp_reg = PP_CONTROL;
916235783Skib		lvds_reg = LVDS;
917235783Skib	}
918235783Skib
919235783Skib	val = I915_READ(pp_reg);
920235783Skib	if (!(val & PANEL_POWER_ON) ||
921235783Skib	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
922235783Skib		locked = false;
923235783Skib
924235783Skib	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
925235783Skib		panel_pipe = PIPE_B;
926235783Skib
927235783Skib	if (panel_pipe == pipe && locked)
928235783Skib		printf("panel assertion failure, pipe %c regs locked\n",
929235783Skib	     pipe_name(pipe));
930235783Skib}
931235783Skib
932235783Skibvoid assert_pipe(struct drm_i915_private *dev_priv,
933235783Skib		 enum pipe pipe, bool state)
934235783Skib{
935235783Skib	int reg;
936235783Skib	u32 val;
937235783Skib	bool cur_state;
938235783Skib
939235783Skib	/* if we need the pipe A quirk it must be always on */
940235783Skib	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
941235783Skib		state = true;
942235783Skib
943235783Skib	reg = PIPECONF(pipe);
944235783Skib	val = I915_READ(reg);
945235783Skib	cur_state = !!(val & PIPECONF_ENABLE);
946235783Skib	if (cur_state != state)
947235783Skib		printf("pipe %c assertion failure (expected %s, current %s)\n",
948235783Skib		    pipe_name(pipe), state_string(state), state_string(cur_state));
949235783Skib}
950235783Skib
951235783Skibstatic void assert_plane(struct drm_i915_private *dev_priv,
952235783Skib			 enum plane plane, bool state)
953235783Skib{
954235783Skib	int reg;
955235783Skib	u32 val;
956235783Skib	bool cur_state;
957235783Skib
958235783Skib	reg = DSPCNTR(plane);
959235783Skib	val = I915_READ(reg);
960235783Skib	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
961235783Skib	if (cur_state != state)
962235783Skib		printf("plane %c assertion failure, (expected %s, current %s)\n",
963235783Skib		       plane_name(plane), state_string(state), state_string(cur_state));
964235783Skib}
965235783Skib
966235783Skib#define assert_plane_enabled(d, p) assert_plane(d, p, true)
967235783Skib#define assert_plane_disabled(d, p) assert_plane(d, p, false)
968235783Skib
969235783Skibstatic void assert_planes_disabled(struct drm_i915_private *dev_priv,
970235783Skib				   enum pipe pipe)
971235783Skib{
972235783Skib	int reg, i;
973235783Skib	u32 val;
974235783Skib	int cur_pipe;
975235783Skib
976235783Skib	/* Planes are fixed to pipes on ILK+ */
977235783Skib	if (HAS_PCH_SPLIT(dev_priv->dev)) {
978235783Skib		reg = DSPCNTR(pipe);
979235783Skib		val = I915_READ(reg);
980235783Skib		if ((val & DISPLAY_PLANE_ENABLE) != 0)
981235783Skib			printf("plane %c assertion failure, should be disabled but not\n",
982235783Skib			       plane_name(pipe));
983235783Skib		return;
984235783Skib	}
985235783Skib
986235783Skib	/* Need to check both planes against the pipe */
987235783Skib	for (i = 0; i < 2; i++) {
988235783Skib		reg = DSPCNTR(i);
989235783Skib		val = I915_READ(reg);
990235783Skib		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
991235783Skib			DISPPLANE_SEL_PIPE_SHIFT;
992235783Skib		if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
993235783Skib			printf("plane %c assertion failure, should be off on pipe %c but is still active\n",
994235783Skib		     plane_name(i), pipe_name(pipe));
995235783Skib	}
996235783Skib}
997235783Skib
998235783Skibstatic void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
999235783Skib{
1000235783Skib	u32 val;
1001235783Skib	bool enabled;
1002235783Skib
1003235783Skib	val = I915_READ(PCH_DREF_CONTROL);
1004235783Skib	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1005235783Skib			    DREF_SUPERSPREAD_SOURCE_MASK));
1006235783Skib	if (!enabled)
1007235783Skib		printf("PCH refclk assertion failure, should be active but is disabled\n");
1008235783Skib}
1009235783Skib
1010235783Skibstatic void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1011235783Skib				       enum pipe pipe)
1012235783Skib{
1013235783Skib	int reg;
1014235783Skib	u32 val;
1015235783Skib	bool enabled;
1016235783Skib
1017235783Skib	reg = TRANSCONF(pipe);
1018235783Skib	val = I915_READ(reg);
1019235783Skib	enabled = !!(val & TRANS_ENABLE);
1020235783Skib	if (enabled)
1021235783Skib		printf("transcoder assertion failed, should be off on pipe %c but is still active\n",
1022235783Skib	     pipe_name(pipe));
1023235783Skib}
1024235783Skib
1025235783Skibstatic bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1026235783Skib			      enum pipe pipe, u32 val)
1027235783Skib{
1028235783Skib	if ((val & PORT_ENABLE) == 0)
1029235783Skib		return false;
1030235783Skib
1031235783Skib	if (HAS_PCH_CPT(dev_priv->dev)) {
1032235783Skib		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1033235783Skib			return false;
1034235783Skib	} else {
1035235783Skib		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1036235783Skib			return false;
1037235783Skib	}
1038235783Skib	return true;
1039235783Skib}
1040235783Skib
1041235783Skibstatic bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1042235783Skib			      enum pipe pipe, u32 val)
1043235783Skib{
1044235783Skib	if ((val & LVDS_PORT_EN) == 0)
1045235783Skib		return false;
1046235783Skib
1047235783Skib	if (HAS_PCH_CPT(dev_priv->dev)) {
1048235783Skib		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1049235783Skib			return false;
1050235783Skib	} else {
1051235783Skib		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1052235783Skib			return false;
1053235783Skib	}
1054235783Skib	return true;
1055235783Skib}
1056235783Skib
1057235783Skibstatic bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1058235783Skib			      enum pipe pipe, u32 val)
1059235783Skib{
1060235783Skib	if ((val & ADPA_DAC_ENABLE) == 0)
1061235783Skib		return false;
1062235783Skib	if (HAS_PCH_CPT(dev_priv->dev)) {
1063235783Skib		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1064235783Skib			return false;
1065235783Skib	} else {
1066235783Skib		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1067235783Skib			return false;
1068235783Skib	}
1069235783Skib	return true;
1070235783Skib}
1071235783Skib
1072235783Skibstatic bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1073235783Skib			    enum pipe pipe, u32 port_sel, u32 val)
1074235783Skib{
1075235783Skib	if ((val & DP_PORT_EN) == 0)
1076235783Skib		return false;
1077235783Skib
1078235783Skib	if (HAS_PCH_CPT(dev_priv->dev)) {
1079235783Skib		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1080235783Skib		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1081235783Skib		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1082235783Skib			return false;
1083235783Skib	} else {
1084235783Skib		if ((val & DP_PIPE_MASK) != (pipe << 30))
1085235783Skib			return false;
1086235783Skib	}
1087235783Skib	return true;
1088235783Skib}
1089235783Skib
1090235783Skibstatic void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1091235783Skib				   enum pipe pipe, int reg, u32 port_sel)
1092235783Skib{
1093235783Skib	u32 val = I915_READ(reg);
1094235783Skib	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
1095235783Skib		printf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1096235783Skib	     reg, pipe_name(pipe));
1097235783Skib}
1098235783Skib
1099235783Skibstatic void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1100235783Skib				     enum pipe pipe, int reg)
1101235783Skib{
1102235783Skib	u32 val = I915_READ(reg);
1103235783Skib	if (hdmi_pipe_enabled(dev_priv, val, pipe))
1104235783Skib		printf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1105235783Skib	     reg, pipe_name(pipe));
1106235783Skib}
1107235783Skib
1108235783Skibstatic void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1109235783Skib				      enum pipe pipe)
1110235783Skib{
1111235783Skib	int reg;
1112235783Skib	u32 val;
1113235783Skib
1114235783Skib	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1115235783Skib	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1116235783Skib	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1117235783Skib
1118235783Skib	reg = PCH_ADPA;
1119235783Skib	val = I915_READ(reg);
1120235783Skib	if (adpa_pipe_enabled(dev_priv, val, pipe))
1121235783Skib		printf("PCH VGA enabled on transcoder %c, should be disabled\n",
1122235783Skib	     pipe_name(pipe));
1123235783Skib
1124235783Skib	reg = PCH_LVDS;
1125235783Skib	val = I915_READ(reg);
1126235783Skib	if (lvds_pipe_enabled(dev_priv, val, pipe))
1127235783Skib		printf("PCH LVDS enabled on transcoder %c, should be disabled\n",
1128235783Skib	     pipe_name(pipe));
1129235783Skib
1130235783Skib	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1131235783Skib	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1132235783Skib	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1133235783Skib}
1134235783Skib
1135235783Skib/**
1136235783Skib * intel_enable_pll - enable a PLL
1137235783Skib * @dev_priv: i915 private structure
1138235783Skib * @pipe: pipe PLL to enable
1139235783Skib *
1140235783Skib * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1141235783Skib * make sure the PLL reg is writable first though, since the panel write
1142235783Skib * protect mechanism may be enabled.
1143235783Skib *
1144235783Skib * Note!  This is for pre-ILK only.
1145235783Skib */
1146235783Skibstatic void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1147235783Skib{
1148235783Skib	int reg;
1149235783Skib	u32 val;
1150235783Skib
1151235783Skib	/* No really, not for ILK+ */
1152235783Skib	KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
1153235783Skib
1154235783Skib	/* PLL is protected by panel, make sure we can write it */
1155235783Skib	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1156235783Skib		assert_panel_unlocked(dev_priv, pipe);
1157235783Skib
1158235783Skib	reg = DPLL(pipe);
1159235783Skib	val = I915_READ(reg);
1160235783Skib	val |= DPLL_VCO_ENABLE;
1161235783Skib
1162235783Skib	/* We do this three times for luck */
1163235783Skib	I915_WRITE(reg, val);
1164235783Skib	POSTING_READ(reg);
1165235783Skib	DELAY(150); /* wait for warmup */
1166235783Skib	I915_WRITE(reg, val);
1167235783Skib	POSTING_READ(reg);
1168235783Skib	DELAY(150); /* wait for warmup */
1169235783Skib	I915_WRITE(reg, val);
1170235783Skib	POSTING_READ(reg);
1171235783Skib	DELAY(150); /* wait for warmup */
1172235783Skib}
1173235783Skib
1174235783Skib/**
1175235783Skib * intel_disable_pll - disable a PLL
1176235783Skib * @dev_priv: i915 private structure
1177235783Skib * @pipe: pipe PLL to disable
1178235783Skib *
1179235783Skib * Disable the PLL for @pipe, making sure the pipe is off first.
1180235783Skib *
1181235783Skib * Note!  This is for pre-ILK only.
1182235783Skib */
1183235783Skibstatic void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1184235783Skib{
1185235783Skib	int reg;
1186235783Skib	u32 val;
1187235783Skib
1188235783Skib	/* Don't disable pipe A or pipe A PLLs if needed */
1189235783Skib	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1190235783Skib		return;
1191235783Skib
1192235783Skib	/* Make sure the pipe isn't still relying on us */
1193235783Skib	assert_pipe_disabled(dev_priv, pipe);
1194235783Skib
1195235783Skib	reg = DPLL(pipe);
1196235783Skib	val = I915_READ(reg);
1197235783Skib	val &= ~DPLL_VCO_ENABLE;
1198235783Skib	I915_WRITE(reg, val);
1199235783Skib	POSTING_READ(reg);
1200235783Skib}
1201235783Skib
1202235783Skib/**
1203235783Skib * intel_enable_pch_pll - enable PCH PLL
1204235783Skib * @dev_priv: i915 private structure
1205235783Skib * @pipe: pipe PLL to enable
1206235783Skib *
1207235783Skib * The PCH PLL needs to be enabled before the PCH transcoder, since it
1208235783Skib * drives the transcoder clock.
1209235783Skib */
1210235783Skibstatic void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1211235783Skib				 enum pipe pipe)
1212235783Skib{
1213235783Skib	int reg;
1214235783Skib	u32 val;
1215235783Skib
1216235783Skib	if (pipe > 1)
1217235783Skib		return;
1218235783Skib
1219235783Skib	/* PCH only available on ILK+ */
1220235783Skib	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1221235783Skib
1222235783Skib	/* PCH refclock must be enabled first */
1223235783Skib	assert_pch_refclk_enabled(dev_priv);
1224235783Skib
1225235783Skib	reg = PCH_DPLL(pipe);
1226235783Skib	val = I915_READ(reg);
1227235783Skib	val |= DPLL_VCO_ENABLE;
1228235783Skib	I915_WRITE(reg, val);
1229235783Skib	POSTING_READ(reg);
1230235783Skib	DELAY(200);
1231235783Skib}
1232235783Skib
1233235783Skibstatic void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1234235783Skib				  enum pipe pipe)
1235235783Skib{
1236235783Skib	int reg;
1237235783Skib	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1238235783Skib		pll_sel = TRANSC_DPLL_ENABLE;
1239235783Skib
1240235783Skib	if (pipe > 1)
1241235783Skib		return;
1242235783Skib
1243235783Skib	/* PCH only available on ILK+ */
1244235783Skib	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1245235783Skib
1246235783Skib	/* Make sure transcoder isn't still depending on us */
1247235783Skib	assert_transcoder_disabled(dev_priv, pipe);
1248235783Skib
1249235783Skib	if (pipe == 0)
1250235783Skib		pll_sel |= TRANSC_DPLLA_SEL;
1251235783Skib	else if (pipe == 1)
1252235783Skib		pll_sel |= TRANSC_DPLLB_SEL;
1253235783Skib
1254235783Skib
1255235783Skib	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1256235783Skib		return;
1257235783Skib
1258235783Skib	reg = PCH_DPLL(pipe);
1259235783Skib	val = I915_READ(reg);
1260235783Skib	val &= ~DPLL_VCO_ENABLE;
1261235783Skib	I915_WRITE(reg, val);
1262235783Skib	POSTING_READ(reg);
1263235783Skib	DELAY(200);
1264235783Skib}
1265235783Skib
1266235783Skibstatic void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267235783Skib				    enum pipe pipe)
1268235783Skib{
1269235783Skib	int reg;
1270235783Skib	u32 val, pipeconf_val;
1271235783Skib	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1272235783Skib
1273235783Skib	/* PCH only available on ILK+ */
1274235783Skib	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1275235783Skib
1276235783Skib	/* Make sure PCH DPLL is enabled */
1277235783Skib	assert_pch_pll_enabled(dev_priv, pipe);
1278235783Skib
1279235783Skib	/* FDI must be feeding us bits for PCH ports */
1280235783Skib	assert_fdi_tx_enabled(dev_priv, pipe);
1281235783Skib	assert_fdi_rx_enabled(dev_priv, pipe);
1282235783Skib
1283235783Skib
1284235783Skib	reg = TRANSCONF(pipe);
1285235783Skib	val = I915_READ(reg);
1286235783Skib	pipeconf_val = I915_READ(PIPECONF(pipe));
1287235783Skib
1288235783Skib	if (HAS_PCH_IBX(dev_priv->dev)) {
1289235783Skib		/*
1290235783Skib		 * make the BPC in transcoder be consistent with
1291235783Skib		 * that in pipeconf reg.
1292235783Skib		 */
1293235783Skib		val &= ~PIPE_BPC_MASK;
1294235783Skib		val |= pipeconf_val & PIPE_BPC_MASK;
1295235783Skib	}
1296235783Skib
1297235783Skib	val &= ~TRANS_INTERLACE_MASK;
1298235783Skib	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1299235783Skib		if (HAS_PCH_IBX(dev_priv->dev) &&
1300235783Skib		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1301235783Skib			val |= TRANS_LEGACY_INTERLACED_ILK;
1302235783Skib		else
1303235783Skib			val |= TRANS_INTERLACED;
1304235783Skib	else
1305235783Skib		val |= TRANS_PROGRESSIVE;
1306235783Skib
1307235783Skib	I915_WRITE(reg, val | TRANS_ENABLE);
1308235783Skib	if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
1309235783Skib	    100, 1, "915trc"))
1310235783Skib		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1311235783Skib}
1312235783Skib
1313235783Skibstatic void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1314235783Skib				     enum pipe pipe)
1315235783Skib{
1316235783Skib	int reg;
1317235783Skib	u32 val;
1318235783Skib
1319235783Skib	/* FDI relies on the transcoder */
1320235783Skib	assert_fdi_tx_disabled(dev_priv, pipe);
1321235783Skib	assert_fdi_rx_disabled(dev_priv, pipe);
1322235783Skib
1323235783Skib	/* Ports must be off as well */
1324235783Skib	assert_pch_ports_disabled(dev_priv, pipe);
1325235783Skib
1326235783Skib	reg = TRANSCONF(pipe);
1327235783Skib	val = I915_READ(reg);
1328235783Skib	val &= ~TRANS_ENABLE;
1329235783Skib	I915_WRITE(reg, val);
1330235783Skib	/* wait for PCH transcoder off, transcoder state */
1331235783Skib	if (_intel_wait_for(dev_priv->dev,
1332235783Skib	    (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
1333235783Skib	    1, "915trd"))
1334235783Skib		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1335235783Skib}
1336235783Skib
1337235783Skib/**
1338235783Skib * intel_enable_pipe - enable a pipe, asserting requirements
1339235783Skib * @dev_priv: i915 private structure
1340235783Skib * @pipe: pipe to enable
1341235783Skib * @pch_port: on ILK+, is this pipe driving a PCH port or not
1342235783Skib *
1343235783Skib * Enable @pipe, making sure that various hardware specific requirements
1344235783Skib * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1345235783Skib *
1346235783Skib * @pipe should be %PIPE_A or %PIPE_B.
1347235783Skib *
1348235783Skib * Will wait until the pipe is actually running (i.e. first vblank) before
1349235783Skib * returning.
1350235783Skib */
1351235783Skibstatic void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1352235783Skib			      bool pch_port)
1353235783Skib{
1354235783Skib	int reg;
1355235783Skib	u32 val;
1356235783Skib
1357235783Skib	/*
1358235783Skib	 * A pipe without a PLL won't actually be able to drive bits from
1359235783Skib	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1360235783Skib	 * need the check.
1361235783Skib	 */
1362235783Skib	if (!HAS_PCH_SPLIT(dev_priv->dev))
1363235783Skib		assert_pll_enabled(dev_priv, pipe);
1364235783Skib	else {
1365235783Skib		if (pch_port) {
1366235783Skib			/* if driving the PCH, we need FDI enabled */
1367235783Skib			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1368235783Skib			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1369235783Skib		}
1370235783Skib		/* FIXME: assert CPU port conditions for SNB+ */
1371235783Skib	}
1372235783Skib
1373235783Skib	reg = PIPECONF(pipe);
1374235783Skib	val = I915_READ(reg);
1375235783Skib	if (val & PIPECONF_ENABLE)
1376235783Skib		return;
1377235783Skib
1378235783Skib	I915_WRITE(reg, val | PIPECONF_ENABLE);
1379235783Skib	intel_wait_for_vblank(dev_priv->dev, pipe);
1380235783Skib}
1381235783Skib
1382235783Skib/**
1383235783Skib * intel_disable_pipe - disable a pipe, asserting requirements
1384235783Skib * @dev_priv: i915 private structure
1385235783Skib * @pipe: pipe to disable
1386235783Skib *
1387235783Skib * Disable @pipe, making sure that various hardware specific requirements
1388235783Skib * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1389235783Skib *
1390235783Skib * @pipe should be %PIPE_A or %PIPE_B.
1391235783Skib *
1392235783Skib * Will wait until the pipe has shut down before returning.
1393235783Skib */
1394235783Skibstatic void intel_disable_pipe(struct drm_i915_private *dev_priv,
1395235783Skib			       enum pipe pipe)
1396235783Skib{
1397235783Skib	int reg;
1398235783Skib	u32 val;
1399235783Skib
1400235783Skib	/*
1401235783Skib	 * Make sure planes won't keep trying to pump pixels to us,
1402235783Skib	 * or we might hang the display.
1403235783Skib	 */
1404235783Skib	assert_planes_disabled(dev_priv, pipe);
1405235783Skib
1406235783Skib	/* Don't disable pipe A or pipe A PLLs if needed */
1407235783Skib	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1408235783Skib		return;
1409235783Skib
1410235783Skib	reg = PIPECONF(pipe);
1411235783Skib	val = I915_READ(reg);
1412235783Skib	if ((val & PIPECONF_ENABLE) == 0)
1413235783Skib		return;
1414235783Skib
1415235783Skib	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1416235783Skib	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1417235783Skib}
1418235783Skib
1419235783Skib/*
1420235783Skib * Plane regs are double buffered, going from enabled->disabled needs a
1421235783Skib * trigger in order to latch.  The display address reg provides this.
1422235783Skib */
1423235783Skibstatic void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1424235783Skib				      enum plane plane)
1425235783Skib{
1426235783Skib	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1427235783Skib	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1428235783Skib}
1429235783Skib
1430235783Skib/**
1431235783Skib * intel_enable_plane - enable a display plane on a given pipe
1432235783Skib * @dev_priv: i915 private structure
1433235783Skib * @plane: plane to enable
1434235783Skib * @pipe: pipe being fed
1435235783Skib *
1436235783Skib * Enable @plane on @pipe, making sure that @pipe is running first.
1437235783Skib */
1438235783Skibstatic void intel_enable_plane(struct drm_i915_private *dev_priv,
1439235783Skib			       enum plane plane, enum pipe pipe)
1440235783Skib{
1441235783Skib	int reg;
1442235783Skib	u32 val;
1443235783Skib
1444235783Skib	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1445235783Skib	assert_pipe_enabled(dev_priv, pipe);
1446235783Skib
1447235783Skib	reg = DSPCNTR(plane);
1448235783Skib	val = I915_READ(reg);
1449235783Skib	if (val & DISPLAY_PLANE_ENABLE)
1450235783Skib		return;
1451235783Skib
1452235783Skib	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1453235783Skib	intel_flush_display_plane(dev_priv, plane);
1454235783Skib	intel_wait_for_vblank(dev_priv->dev, pipe);
1455235783Skib}
1456235783Skib
1457235783Skib/**
1458235783Skib * intel_disable_plane - disable a display plane
1459235783Skib * @dev_priv: i915 private structure
1460235783Skib * @plane: plane to disable
1461235783Skib * @pipe: pipe consuming the data
1462235783Skib *
1463235783Skib * Disable @plane; should be an independent operation.
1464235783Skib */
1465235783Skibstatic void intel_disable_plane(struct drm_i915_private *dev_priv,
1466235783Skib				enum plane plane, enum pipe pipe)
1467235783Skib{
1468235783Skib	int reg;
1469235783Skib	u32 val;
1470235783Skib
1471235783Skib	reg = DSPCNTR(plane);
1472235783Skib	val = I915_READ(reg);
1473235783Skib	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1474235783Skib		return;
1475235783Skib
1476235783Skib	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1477235783Skib	intel_flush_display_plane(dev_priv, plane);
1478235783Skib	intel_wait_for_vblank(dev_priv->dev, pipe);
1479235783Skib}
1480235783Skib
1481235783Skibstatic void disable_pch_dp(struct drm_i915_private *dev_priv,
1482235783Skib			   enum pipe pipe, int reg, u32 port_sel)
1483235783Skib{
1484235783Skib	u32 val = I915_READ(reg);
1485235783Skib	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1486235783Skib		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1487235783Skib		I915_WRITE(reg, val & ~DP_PORT_EN);
1488235783Skib	}
1489235783Skib}
1490235783Skib
1491235783Skibstatic void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1492235783Skib			     enum pipe pipe, int reg)
1493235783Skib{
1494235783Skib	u32 val = I915_READ(reg);
1495235783Skib	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1496235783Skib		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1497235783Skib			      reg, pipe);
1498235783Skib		I915_WRITE(reg, val & ~PORT_ENABLE);
1499235783Skib	}
1500235783Skib}
1501235783Skib
1502235783Skib/* Disable any ports connected to this transcoder */
1503235783Skibstatic void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1504235783Skib				    enum pipe pipe)
1505235783Skib{
1506235783Skib	u32 reg, val;
1507235783Skib
1508235783Skib	val = I915_READ(PCH_PP_CONTROL);
1509235783Skib	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1510235783Skib
1511235783Skib	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1512235783Skib	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1513235783Skib	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1514235783Skib
1515235783Skib	reg = PCH_ADPA;
1516235783Skib	val = I915_READ(reg);
1517235783Skib	if (adpa_pipe_enabled(dev_priv, val, pipe))
1518235783Skib		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1519235783Skib
1520235783Skib	reg = PCH_LVDS;
1521235783Skib	val = I915_READ(reg);
1522235783Skib	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1523235783Skib		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1524235783Skib		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1525235783Skib		POSTING_READ(reg);
1526235783Skib		DELAY(100);
1527235783Skib	}
1528235783Skib
1529235783Skib	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1530235783Skib	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1531235783Skib	disable_pch_hdmi(dev_priv, pipe, HDMID);
1532235783Skib}
1533235783Skib
1534235783Skibstatic void i8xx_disable_fbc(struct drm_device *dev)
1535235783Skib{
1536235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1537235783Skib	u32 fbc_ctl;
1538235783Skib
1539235783Skib	/* Disable compression */
1540235783Skib	fbc_ctl = I915_READ(FBC_CONTROL);
1541235783Skib	if ((fbc_ctl & FBC_CTL_EN) == 0)
1542235783Skib		return;
1543235783Skib
1544235783Skib	fbc_ctl &= ~FBC_CTL_EN;
1545235783Skib	I915_WRITE(FBC_CONTROL, fbc_ctl);
1546235783Skib
1547235783Skib	/* Wait for compressing bit to clear */
1548235783Skib	if (_intel_wait_for(dev,
1549235783Skib	    (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
1550235783Skib	    1, "915fbd")) {
1551235783Skib		DRM_DEBUG_KMS("FBC idle timed out\n");
1552235783Skib		return;
1553235783Skib	}
1554235783Skib
1555235783Skib	DRM_DEBUG_KMS("disabled FBC\n");
1556235783Skib}
1557235783Skib
1558235783Skibstatic void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1559235783Skib{
1560235783Skib	struct drm_device *dev = crtc->dev;
1561235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1562235783Skib	struct drm_framebuffer *fb = crtc->fb;
1563235783Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1564235783Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
1565235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1566235783Skib	int cfb_pitch;
1567235783Skib	int plane, i;
1568235783Skib	u32 fbc_ctl, fbc_ctl2;
1569235783Skib
1570235783Skib	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1571235783Skib	if (fb->pitches[0] < cfb_pitch)
1572235783Skib		cfb_pitch = fb->pitches[0];
1573235783Skib
1574235783Skib	/* FBC_CTL wants 64B units */
1575235783Skib	cfb_pitch = (cfb_pitch / 64) - 1;
1576235783Skib	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1577235783Skib
1578235783Skib	/* Clear old tags */
1579235783Skib	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1580235783Skib		I915_WRITE(FBC_TAG + (i * 4), 0);
1581235783Skib
1582235783Skib	/* Set it up... */
1583235783Skib	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1584235783Skib	fbc_ctl2 |= plane;
1585235783Skib	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1586235783Skib	I915_WRITE(FBC_FENCE_OFF, crtc->y);
1587235783Skib
1588235783Skib	/* enable it... */
1589235783Skib	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1590235783Skib	if (IS_I945GM(dev))
1591235783Skib		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1592235783Skib	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1593235783Skib	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1594235783Skib	fbc_ctl |= obj->fence_reg;
1595235783Skib	I915_WRITE(FBC_CONTROL, fbc_ctl);
1596235783Skib
1597235783Skib	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1598235783Skib		      cfb_pitch, crtc->y, intel_crtc->plane);
1599235783Skib}
1600235783Skib
1601235783Skibstatic bool i8xx_fbc_enabled(struct drm_device *dev)
1602235783Skib{
1603235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1604235783Skib
1605235783Skib	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1606235783Skib}
1607235783Skib
1608235783Skibstatic void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1609235783Skib{
1610235783Skib	struct drm_device *dev = crtc->dev;
1611235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1612235783Skib	struct drm_framebuffer *fb = crtc->fb;
1613235783Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1614235783Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
1615235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1616235783Skib	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1617235783Skib	unsigned long stall_watermark = 200;
1618235783Skib	u32 dpfc_ctl;
1619235783Skib
1620235783Skib	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1621235783Skib	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1622235783Skib	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1623235783Skib
1624235783Skib	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1625235783Skib		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1626235783Skib		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1627235783Skib	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1628235783Skib
1629235783Skib	/* enable it... */
1630235783Skib	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1631235783Skib
1632235783Skib	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1633235783Skib}
1634235783Skib
1635235783Skibstatic void g4x_disable_fbc(struct drm_device *dev)
1636235783Skib{
1637235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1638235783Skib	u32 dpfc_ctl;
1639235783Skib
1640235783Skib	/* Disable compression */
1641235783Skib	dpfc_ctl = I915_READ(DPFC_CONTROL);
1642235783Skib	if (dpfc_ctl & DPFC_CTL_EN) {
1643235783Skib		dpfc_ctl &= ~DPFC_CTL_EN;
1644235783Skib		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1645235783Skib
1646235783Skib		DRM_DEBUG_KMS("disabled FBC\n");
1647235783Skib	}
1648235783Skib}
1649235783Skib
1650235783Skibstatic bool g4x_fbc_enabled(struct drm_device *dev)
1651235783Skib{
1652235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1653235783Skib
1654235783Skib	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1655235783Skib}
1656235783Skib
1657235783Skibstatic void sandybridge_blit_fbc_update(struct drm_device *dev)
1658235783Skib{
1659235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1660235783Skib	u32 blt_ecoskpd;
1661235783Skib
1662235783Skib	/* Make sure blitter notifies FBC of writes */
1663235783Skib	gen6_gt_force_wake_get(dev_priv);
1664235783Skib	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1665235783Skib	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1666235783Skib		GEN6_BLITTER_LOCK_SHIFT;
1667235783Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1668235783Skib	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1669235783Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1670235783Skib	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1671235783Skib			 GEN6_BLITTER_LOCK_SHIFT);
1672235783Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1673235783Skib	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1674235783Skib	gen6_gt_force_wake_put(dev_priv);
1675235783Skib}
1676235783Skib
1677235783Skibstatic void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1678235783Skib{
1679235783Skib	struct drm_device *dev = crtc->dev;
1680235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1681235783Skib	struct drm_framebuffer *fb = crtc->fb;
1682235783Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1683235783Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
1684235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1685235783Skib	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1686235783Skib	unsigned long stall_watermark = 200;
1687235783Skib	u32 dpfc_ctl;
1688235783Skib
1689235783Skib	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1690235783Skib	dpfc_ctl &= DPFC_RESERVED;
1691235783Skib	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1692235783Skib	/* Set persistent mode for front-buffer rendering, ala X. */
1693235783Skib	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1694235783Skib	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1695235783Skib	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1696235783Skib
1697235783Skib	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1698235783Skib		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1699235783Skib		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1700235783Skib	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1701235783Skib	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1702235783Skib	/* enable it... */
1703235783Skib	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1704235783Skib
1705235783Skib	if (IS_GEN6(dev)) {
1706235783Skib		I915_WRITE(SNB_DPFC_CTL_SA,
1707235783Skib			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1708235783Skib		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1709235783Skib		sandybridge_blit_fbc_update(dev);
1710235783Skib	}
1711235783Skib
1712235783Skib	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1713235783Skib}
1714235783Skib
1715235783Skibstatic void ironlake_disable_fbc(struct drm_device *dev)
1716235783Skib{
1717235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1718235783Skib	u32 dpfc_ctl;
1719235783Skib
1720235783Skib	/* Disable compression */
1721235783Skib	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1722235783Skib	if (dpfc_ctl & DPFC_CTL_EN) {
1723235783Skib		dpfc_ctl &= ~DPFC_CTL_EN;
1724235783Skib		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1725235783Skib
1726235783Skib		DRM_DEBUG_KMS("disabled FBC\n");
1727235783Skib	}
1728235783Skib}
1729235783Skib
1730235783Skibstatic bool ironlake_fbc_enabled(struct drm_device *dev)
1731235783Skib{
1732235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1733235783Skib
1734235783Skib	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1735235783Skib}
1736235783Skib
1737235783Skibbool intel_fbc_enabled(struct drm_device *dev)
1738235783Skib{
1739235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1740235783Skib
1741235783Skib	if (!dev_priv->display.fbc_enabled)
1742235783Skib		return false;
1743235783Skib
1744235783Skib	return dev_priv->display.fbc_enabled(dev);
1745235783Skib}
1746235783Skib
1747235783Skibstatic void intel_fbc_work_fn(void *arg, int pending)
1748235783Skib{
1749235783Skib	struct intel_fbc_work *work = arg;
1750235783Skib	struct drm_device *dev = work->crtc->dev;
1751235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1752235783Skib
1753235783Skib	DRM_LOCK(dev);
1754235783Skib	if (work == dev_priv->fbc_work) {
1755235783Skib		/* Double check that we haven't switched fb without cancelling
1756235783Skib		 * the prior work.
1757235783Skib		 */
1758235783Skib		if (work->crtc->fb == work->fb) {
1759235783Skib			dev_priv->display.enable_fbc(work->crtc,
1760235783Skib						     work->interval);
1761235783Skib
1762235783Skib			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1763235783Skib			dev_priv->cfb_fb = work->crtc->fb->base.id;
1764235783Skib			dev_priv->cfb_y = work->crtc->y;
1765235783Skib		}
1766235783Skib
1767235783Skib		dev_priv->fbc_work = NULL;
1768235783Skib	}
1769235783Skib	DRM_UNLOCK(dev);
1770235783Skib
1771235783Skib	free(work, DRM_MEM_KMS);
1772235783Skib}
1773235783Skib
1774235783Skibstatic void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1775235783Skib{
1776235783Skib	u_int pending;
1777235783Skib
1778235783Skib	if (dev_priv->fbc_work == NULL)
1779235783Skib		return;
1780235783Skib
1781235783Skib	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1782235783Skib
1783235783Skib	/* Synchronisation is provided by struct_mutex and checking of
1784235783Skib	 * dev_priv->fbc_work, so we can perform the cancellation
1785235783Skib	 * entirely asynchronously.
1786235783Skib	 */
1787235783Skib	if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
1788235783Skib	    &pending) == 0)
1789235783Skib		/* tasklet was killed before being run, clean up */
1790235783Skib		free(dev_priv->fbc_work, DRM_MEM_KMS);
1791235783Skib
1792235783Skib	/* Mark the work as no longer wanted so that if it does
1793235783Skib	 * wake-up (because the work was already running and waiting
1794235783Skib	 * for our mutex), it will discover that is no longer
1795235783Skib	 * necessary to run.
1796235783Skib	 */
1797235783Skib	dev_priv->fbc_work = NULL;
1798235783Skib}
1799235783Skib
1800235783Skibstatic void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1801235783Skib{
1802235783Skib	struct intel_fbc_work *work;
1803235783Skib	struct drm_device *dev = crtc->dev;
1804235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1805235783Skib
1806235783Skib	if (!dev_priv->display.enable_fbc)
1807235783Skib		return;
1808235783Skib
1809235783Skib	intel_cancel_fbc_work(dev_priv);
1810235783Skib
1811235783Skib	work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
1812235783Skib	work->crtc = crtc;
1813235783Skib	work->fb = crtc->fb;
1814235783Skib	work->interval = interval;
1815235783Skib	TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
1816235783Skib	    work);
1817235783Skib
1818235783Skib	dev_priv->fbc_work = work;
1819235783Skib
1820235783Skib	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1821235783Skib
1822235783Skib	/* Delay the actual enabling to let pageflipping cease and the
1823235783Skib	 * display to settle before starting the compression. Note that
1824235783Skib	 * this delay also serves a second purpose: it allows for a
1825235783Skib	 * vblank to pass after disabling the FBC before we attempt
1826235783Skib	 * to modify the control registers.
1827235783Skib	 *
1828235783Skib	 * A more complicated solution would involve tracking vblanks
1829235783Skib	 * following the termination of the page-flipping sequence
1830235783Skib	 * and indeed performing the enable as a co-routine and not
1831235783Skib	 * waiting synchronously upon the vblank.
1832235783Skib	 */
1833235783Skib	taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
1834235783Skib	    msecs_to_jiffies(50));
1835235783Skib}
1836235783Skib
1837235783Skibvoid intel_disable_fbc(struct drm_device *dev)
1838235783Skib{
1839235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1840235783Skib
1841235783Skib	intel_cancel_fbc_work(dev_priv);
1842235783Skib
1843235783Skib	if (!dev_priv->display.disable_fbc)
1844235783Skib		return;
1845235783Skib
1846235783Skib	dev_priv->display.disable_fbc(dev);
1847235783Skib	dev_priv->cfb_plane = -1;
1848235783Skib}
1849235783Skib
1850235783Skib/**
1851235783Skib * intel_update_fbc - enable/disable FBC as needed
1852235783Skib * @dev: the drm_device
1853235783Skib *
1854235783Skib * Set up the framebuffer compression hardware at mode set time.  We
1855235783Skib * enable it if possible:
1856235783Skib *   - plane A only (on pre-965)
1857235783Skib *   - no pixel mulitply/line duplication
1858235783Skib *   - no alpha buffer discard
1859235783Skib *   - no dual wide
1860235783Skib *   - framebuffer <= 2048 in width, 1536 in height
1861235783Skib *
1862235783Skib * We can't assume that any compression will take place (worst case),
1863235783Skib * so the compressed buffer has to be the same size as the uncompressed
1864235783Skib * one.  It also must reside (along with the line length buffer) in
1865235783Skib * stolen memory.
1866235783Skib *
1867235783Skib * We need to enable/disable FBC on a global basis.
1868235783Skib */
1869235783Skibstatic void intel_update_fbc(struct drm_device *dev)
1870235783Skib{
1871235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1872235783Skib	struct drm_crtc *crtc = NULL, *tmp_crtc;
1873235783Skib	struct intel_crtc *intel_crtc;
1874235783Skib	struct drm_framebuffer *fb;
1875235783Skib	struct intel_framebuffer *intel_fb;
1876235783Skib	struct drm_i915_gem_object *obj;
1877235783Skib	int enable_fbc;
1878235783Skib
1879235783Skib	DRM_DEBUG_KMS("\n");
1880235783Skib
1881235783Skib	if (!i915_powersave)
1882235783Skib		return;
1883235783Skib
1884235783Skib	if (!I915_HAS_FBC(dev))
1885235783Skib		return;
1886235783Skib
1887235783Skib	/*
1888235783Skib	 * If FBC is already on, we just have to verify that we can
1889235783Skib	 * keep it that way...
1890235783Skib	 * Need to disable if:
1891235783Skib	 *   - more than one pipe is active
1892235783Skib	 *   - changing FBC params (stride, fence, mode)
1893235783Skib	 *   - new fb is too large to fit in compressed buffer
1894235783Skib	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1895235783Skib	 */
1896235783Skib	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1897235783Skib		if (tmp_crtc->enabled && tmp_crtc->fb) {
1898235783Skib			if (crtc) {
1899235783Skib				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1900235783Skib				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1901235783Skib				goto out_disable;
1902235783Skib			}
1903235783Skib			crtc = tmp_crtc;
1904235783Skib		}
1905235783Skib	}
1906235783Skib
1907235783Skib	if (!crtc || crtc->fb == NULL) {
1908235783Skib		DRM_DEBUG_KMS("no output, disabling\n");
1909235783Skib		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1910235783Skib		goto out_disable;
1911235783Skib	}
1912235783Skib
1913235783Skib	intel_crtc = to_intel_crtc(crtc);
1914235783Skib	fb = crtc->fb;
1915235783Skib	intel_fb = to_intel_framebuffer(fb);
1916235783Skib	obj = intel_fb->obj;
1917235783Skib
1918235783Skib	enable_fbc = i915_enable_fbc;
1919235783Skib	if (enable_fbc < 0) {
1920235783Skib		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1921235783Skib		enable_fbc = 1;
1922235783Skib		if (INTEL_INFO(dev)->gen <= 6)
1923235783Skib			enable_fbc = 0;
1924235783Skib	}
1925235783Skib	if (!enable_fbc) {
1926235783Skib		DRM_DEBUG_KMS("fbc disabled per module param\n");
1927235783Skib		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1928235783Skib		goto out_disable;
1929235783Skib	}
1930235783Skib	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1931235783Skib		DRM_DEBUG_KMS("framebuffer too large, disabling "
1932235783Skib			      "compression\n");
1933235783Skib		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1934235783Skib		goto out_disable;
1935235783Skib	}
1936235783Skib	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1937235783Skib	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1938235783Skib		DRM_DEBUG_KMS("mode incompatible with compression, "
1939235783Skib			      "disabling\n");
1940235783Skib		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1941235783Skib		goto out_disable;
1942235783Skib	}
1943235783Skib	if ((crtc->mode.hdisplay > 2048) ||
1944235783Skib	    (crtc->mode.vdisplay > 1536)) {
1945235783Skib		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1946235783Skib		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1947235783Skib		goto out_disable;
1948235783Skib	}
1949235783Skib	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1950235783Skib		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1951235783Skib		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1952235783Skib		goto out_disable;
1953235783Skib	}
1954235783Skib	if (obj->tiling_mode != I915_TILING_X ||
1955235783Skib	    obj->fence_reg == I915_FENCE_REG_NONE) {
1956235783Skib		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1957235783Skib		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1958235783Skib		goto out_disable;
1959235783Skib	}
1960235783Skib
1961235783Skib	/* If the kernel debugger is active, always disable compression */
1962235783Skib	if (kdb_active)
1963235783Skib		goto out_disable;
1964235783Skib
1965235783Skib	/* If the scanout has not changed, don't modify the FBC settings.
1966235783Skib	 * Note that we make the fundamental assumption that the fb->obj
1967235783Skib	 * cannot be unpinned (and have its GTT offset and fence revoked)
1968235783Skib	 * without first being decoupled from the scanout and FBC disabled.
1969235783Skib	 */
1970235783Skib	if (dev_priv->cfb_plane == intel_crtc->plane &&
1971235783Skib	    dev_priv->cfb_fb == fb->base.id &&
1972235783Skib	    dev_priv->cfb_y == crtc->y)
1973235783Skib		return;
1974235783Skib
1975235783Skib	if (intel_fbc_enabled(dev)) {
1976235783Skib		/* We update FBC along two paths, after changing fb/crtc
1977235783Skib		 * configuration (modeswitching) and after page-flipping
1978235783Skib		 * finishes. For the latter, we know that not only did
1979235783Skib		 * we disable the FBC at the start of the page-flip
1980235783Skib		 * sequence, but also more than one vblank has passed.
1981235783Skib		 *
1982235783Skib		 * For the former case of modeswitching, it is possible
1983235783Skib		 * to switch between two FBC valid configurations
1984235783Skib		 * instantaneously so we do need to disable the FBC
1985235783Skib		 * before we can modify its control registers. We also
1986235783Skib		 * have to wait for the next vblank for that to take
1987235783Skib		 * effect. However, since we delay enabling FBC we can
1988235783Skib		 * assume that a vblank has passed since disabling and
1989235783Skib		 * that we can safely alter the registers in the deferred
1990235783Skib		 * callback.
1991235783Skib		 *
1992235783Skib		 * In the scenario that we go from a valid to invalid
1993235783Skib		 * and then back to valid FBC configuration we have
1994235783Skib		 * no strict enforcement that a vblank occurred since
1995235783Skib		 * disabling the FBC. However, along all current pipe
1996235783Skib		 * disabling paths we do need to wait for a vblank at
1997235783Skib		 * some point. And we wait before enabling FBC anyway.
1998235783Skib		 */
1999235783Skib		DRM_DEBUG_KMS("disabling active FBC for update\n");
2000235783Skib		intel_disable_fbc(dev);
2001235783Skib	}
2002235783Skib
2003235783Skib	intel_enable_fbc(crtc, 500);
2004235783Skib	return;
2005235783Skib
2006235783Skibout_disable:
2007235783Skib	/* Multiple disables should be harmless */
2008235783Skib	if (intel_fbc_enabled(dev)) {
2009235783Skib		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2010235783Skib		intel_disable_fbc(dev);
2011235783Skib	}
2012235783Skib}
2013235783Skib
2014235783Skibint
2015235783Skibintel_pin_and_fence_fb_obj(struct drm_device *dev,
2016235783Skib			   struct drm_i915_gem_object *obj,
2017235783Skib			   struct intel_ring_buffer *pipelined)
2018235783Skib{
2019235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2020235783Skib	u32 alignment;
2021235783Skib	int ret;
2022235783Skib
2023235783Skib	alignment = 0; /* shut gcc */
2024235783Skib	switch (obj->tiling_mode) {
2025235783Skib	case I915_TILING_NONE:
2026235783Skib		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2027235783Skib			alignment = 128 * 1024;
2028235783Skib		else if (INTEL_INFO(dev)->gen >= 4)
2029235783Skib			alignment = 4 * 1024;
2030235783Skib		else
2031235783Skib			alignment = 64 * 1024;
2032235783Skib		break;
2033235783Skib	case I915_TILING_X:
2034235783Skib		/* pin() will align the object as required by fence */
2035235783Skib		alignment = 0;
2036235783Skib		break;
2037235783Skib	case I915_TILING_Y:
2038235783Skib		/* FIXME: Is this true? */
2039235783Skib		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2040235783Skib		return -EINVAL;
2041235783Skib	default:
2042235783Skib		KASSERT(0, ("Wrong tiling for fb obj"));
2043235783Skib	}
2044235783Skib
2045235783Skib	dev_priv->mm.interruptible = false;
2046235783Skib	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2047235783Skib	if (ret)
2048235783Skib		goto err_interruptible;
2049235783Skib
2050235783Skib	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2051235783Skib	 * fence, whereas 965+ only requires a fence if using
2052235783Skib	 * framebuffer compression.  For simplicity, we always install
2053235783Skib	 * a fence as the cost is not that onerous.
2054235783Skib	 */
2055235783Skib	if (obj->tiling_mode != I915_TILING_NONE) {
2056235783Skib		ret = i915_gem_object_get_fence(obj, pipelined);
2057235783Skib		if (ret)
2058235783Skib			goto err_unpin;
2059235783Skib
2060235783Skib		i915_gem_object_pin_fence(obj);
2061235783Skib	}
2062235783Skib
2063235783Skib	dev_priv->mm.interruptible = true;
2064235783Skib	return 0;
2065235783Skib
2066235783Skiberr_unpin:
2067235783Skib	i915_gem_object_unpin(obj);
2068235783Skiberr_interruptible:
2069235783Skib	dev_priv->mm.interruptible = true;
2070235783Skib	return ret;
2071235783Skib}
2072235783Skib
2073235783Skibvoid intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2074235783Skib{
2075235783Skib	i915_gem_object_unpin_fence(obj);
2076235783Skib	i915_gem_object_unpin(obj);
2077235783Skib}
2078235783Skib
2079235783Skibstatic int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2080235783Skib			     int x, int y)
2081235783Skib{
2082235783Skib	struct drm_device *dev = crtc->dev;
2083235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2084235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2085235783Skib	struct intel_framebuffer *intel_fb;
2086235783Skib	struct drm_i915_gem_object *obj;
2087235783Skib	int plane = intel_crtc->plane;
2088235783Skib	unsigned long Start, Offset;
2089235783Skib	u32 dspcntr;
2090235783Skib	u32 reg;
2091235783Skib
2092235783Skib	switch (plane) {
2093235783Skib	case 0:
2094235783Skib	case 1:
2095235783Skib		break;
2096235783Skib	default:
2097235783Skib		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2098235783Skib		return -EINVAL;
2099235783Skib	}
2100235783Skib
2101235783Skib	intel_fb = to_intel_framebuffer(fb);
2102235783Skib	obj = intel_fb->obj;
2103235783Skib
2104235783Skib	reg = DSPCNTR(plane);
2105235783Skib	dspcntr = I915_READ(reg);
2106235783Skib	/* Mask out pixel format bits in case we change it */
2107235783Skib	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2108235783Skib	switch (fb->bits_per_pixel) {
2109235783Skib	case 8:
2110235783Skib		dspcntr |= DISPPLANE_8BPP;
2111235783Skib		break;
2112235783Skib	case 16:
2113235783Skib		if (fb->depth == 15)
2114235783Skib			dspcntr |= DISPPLANE_15_16BPP;
2115235783Skib		else
2116235783Skib			dspcntr |= DISPPLANE_16BPP;
2117235783Skib		break;
2118235783Skib	case 24:
2119235783Skib	case 32:
2120235783Skib		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2121235783Skib		break;
2122235783Skib	default:
2123235783Skib		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2124235783Skib		return -EINVAL;
2125235783Skib	}
2126235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
2127235783Skib		if (obj->tiling_mode != I915_TILING_NONE)
2128235783Skib			dspcntr |= DISPPLANE_TILED;
2129235783Skib		else
2130235783Skib			dspcntr &= ~DISPPLANE_TILED;
2131235783Skib	}
2132235783Skib
2133235783Skib	I915_WRITE(reg, dspcntr);
2134235783Skib
2135235783Skib	Start = obj->gtt_offset;
2136235783Skib	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2137235783Skib
2138235783Skib	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2139235783Skib		      Start, Offset, x, y, fb->pitches[0]);
2140235783Skib	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2141235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
2142235783Skib		I915_WRITE(DSPSURF(plane), Start);
2143235783Skib		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2144235783Skib		I915_WRITE(DSPADDR(plane), Offset);
2145235783Skib	} else
2146235783Skib		I915_WRITE(DSPADDR(plane), Start + Offset);
2147235783Skib	POSTING_READ(reg);
2148235783Skib
2149235783Skib	return (0);
2150235783Skib}
2151235783Skib
2152235783Skibstatic int ironlake_update_plane(struct drm_crtc *crtc,
2153235783Skib				 struct drm_framebuffer *fb, int x, int y)
2154235783Skib{
2155235783Skib	struct drm_device *dev = crtc->dev;
2156235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2157235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2158235783Skib	struct intel_framebuffer *intel_fb;
2159235783Skib	struct drm_i915_gem_object *obj;
2160235783Skib	int plane = intel_crtc->plane;
2161235783Skib	unsigned long Start, Offset;
2162235783Skib	u32 dspcntr;
2163235783Skib	u32 reg;
2164235783Skib
2165235783Skib	switch (plane) {
2166235783Skib	case 0:
2167235783Skib	case 1:
2168235783Skib	case 2:
2169235783Skib		break;
2170235783Skib	default:
2171235783Skib		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2172235783Skib		return -EINVAL;
2173235783Skib	}
2174235783Skib
2175235783Skib	intel_fb = to_intel_framebuffer(fb);
2176235783Skib	obj = intel_fb->obj;
2177235783Skib
2178235783Skib	reg = DSPCNTR(plane);
2179235783Skib	dspcntr = I915_READ(reg);
2180235783Skib	/* Mask out pixel format bits in case we change it */
2181235783Skib	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2182235783Skib	switch (fb->bits_per_pixel) {
2183235783Skib	case 8:
2184235783Skib		dspcntr |= DISPPLANE_8BPP;
2185235783Skib		break;
2186235783Skib	case 16:
2187235783Skib		if (fb->depth != 16) {
2188235783Skib			DRM_ERROR("bpp 16, depth %d\n", fb->depth);
2189235783Skib			return -EINVAL;
2190235783Skib		}
2191235783Skib
2192235783Skib		dspcntr |= DISPPLANE_16BPP;
2193235783Skib		break;
2194235783Skib	case 24:
2195235783Skib	case 32:
2196235783Skib		if (fb->depth == 24)
2197235783Skib			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2198235783Skib		else if (fb->depth == 30)
2199235783Skib			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2200235783Skib		else {
2201235783Skib			DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
2202235783Skib			    fb->depth);
2203235783Skib			return -EINVAL;
2204235783Skib		}
2205235783Skib		break;
2206235783Skib	default:
2207235783Skib		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2208235783Skib		return -EINVAL;
2209235783Skib	}
2210235783Skib
2211235783Skib	if (obj->tiling_mode != I915_TILING_NONE)
2212235783Skib		dspcntr |= DISPPLANE_TILED;
2213235783Skib	else
2214235783Skib		dspcntr &= ~DISPPLANE_TILED;
2215235783Skib
2216235783Skib	/* must disable */
2217235783Skib	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2218235783Skib
2219235783Skib	I915_WRITE(reg, dspcntr);
2220235783Skib
2221235783Skib	Start = obj->gtt_offset;
2222235783Skib	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2223235783Skib
2224235783Skib	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2225235783Skib		      Start, Offset, x, y, fb->pitches[0]);
2226235783Skib	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2227235783Skib	I915_WRITE(DSPSURF(plane), Start);
2228235783Skib	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2229235783Skib	I915_WRITE(DSPADDR(plane), Offset);
2230235783Skib	POSTING_READ(reg);
2231235783Skib
2232235783Skib	return 0;
2233235783Skib}
2234235783Skib
2235235783Skib/* Assume fb object is pinned & idle & fenced and just update base pointers */
2236235783Skibstatic int
2237235783Skibintel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2238235783Skib			   int x, int y, enum mode_set_atomic state)
2239235783Skib{
2240235783Skib	struct drm_device *dev = crtc->dev;
2241235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2242235783Skib	int ret;
2243235783Skib
2244235783Skib	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2245235783Skib	if (ret)
2246235783Skib		return ret;
2247235783Skib
2248235783Skib	intel_update_fbc(dev);
2249235783Skib	intel_increase_pllclock(crtc);
2250235783Skib
2251235783Skib	return 0;
2252235783Skib}
2253235783Skib
2254235783Skibstatic int
2255235783Skibintel_finish_fb(struct drm_framebuffer *old_fb)
2256235783Skib{
2257235783Skib	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2258235783Skib	struct drm_device *dev = obj->base.dev;
2259235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2260235783Skib	bool was_interruptible = dev_priv->mm.interruptible;
2261235783Skib	int ret;
2262235783Skib
2263235783Skib	mtx_lock(&dev->event_lock);
2264255013Sjkim	while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
2265255013Sjkim	    atomic_load_acq_int(&obj->pending_flip) != 0) {
2266235783Skib		msleep(&obj->pending_flip, &dev->event_lock,
2267235783Skib		    0, "915flp", 0);
2268235783Skib	}
2269235783Skib	mtx_unlock(&dev->event_lock);
2270235783Skib
2271235783Skib	/* Big Hammer, we also need to ensure that any pending
2272235783Skib	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2273235783Skib	 * current scanout is retired before unpinning the old
2274235783Skib	 * framebuffer.
2275235783Skib	 *
2276235783Skib	 * This should only fail upon a hung GPU, in which case we
2277235783Skib	 * can safely continue.
2278235783Skib	 */
2279235783Skib	dev_priv->mm.interruptible = false;
2280235783Skib	ret = i915_gem_object_finish_gpu(obj);
2281235783Skib	dev_priv->mm.interruptible = was_interruptible;
2282235783Skib	return ret;
2283235783Skib}
2284235783Skib
2285235783Skibstatic int
2286235783Skibintel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2287235783Skib		    struct drm_framebuffer *old_fb)
2288235783Skib{
2289235783Skib	struct drm_device *dev = crtc->dev;
2290235783Skib#if 0
2291235783Skib	struct drm_i915_master_private *master_priv;
2292235783Skib#else
2293235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
2294235783Skib#endif
2295235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2296235783Skib	int ret;
2297235783Skib
2298235783Skib	/* no fb bound */
2299235783Skib	if (!crtc->fb) {
2300235783Skib		DRM_ERROR("No FB bound\n");
2301235783Skib		return 0;
2302235783Skib	}
2303235783Skib
2304235783Skib	switch (intel_crtc->plane) {
2305235783Skib	case 0:
2306235783Skib	case 1:
2307235783Skib		break;
2308235783Skib	case 2:
2309235783Skib		if (IS_IVYBRIDGE(dev))
2310235783Skib			break;
2311235783Skib		/* fall through otherwise */
2312235783Skib	default:
2313235783Skib		DRM_ERROR("no plane for crtc\n");
2314235783Skib		return -EINVAL;
2315235783Skib	}
2316235783Skib
2317235783Skib	DRM_LOCK(dev);
2318235783Skib	ret = intel_pin_and_fence_fb_obj(dev,
2319235783Skib					 to_intel_framebuffer(crtc->fb)->obj,
2320235783Skib					 NULL);
2321235783Skib	if (ret != 0) {
2322235783Skib		DRM_UNLOCK(dev);
2323235783Skib		DRM_ERROR("pin & fence failed\n");
2324235783Skib		return ret;
2325235783Skib	}
2326235783Skib
2327235783Skib	if (old_fb)
2328235783Skib		intel_finish_fb(old_fb);
2329235783Skib
2330235783Skib	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2331235783Skib					 LEAVE_ATOMIC_MODE_SET);
2332235783Skib	if (ret) {
2333235783Skib		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2334235783Skib		DRM_UNLOCK(dev);
2335235783Skib		DRM_ERROR("failed to update base address\n");
2336235783Skib		return ret;
2337235783Skib	}
2338235783Skib
2339235783Skib	if (old_fb) {
2340235783Skib		intel_wait_for_vblank(dev, intel_crtc->pipe);
2341235783Skib		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2342235783Skib	}
2343235783Skib
2344235783Skib	DRM_UNLOCK(dev);
2345235783Skib
2346235783Skib#if 0
2347235783Skib	if (!dev->primary->master)
2348235783Skib		return 0;
2349235783Skib
2350235783Skib	master_priv = dev->primary->master->driver_priv;
2351235783Skib	if (!master_priv->sarea_priv)
2352235783Skib		return 0;
2353235783Skib
2354235783Skib	if (intel_crtc->pipe) {
2355235783Skib		master_priv->sarea_priv->pipeB_x = x;
2356235783Skib		master_priv->sarea_priv->pipeB_y = y;
2357235783Skib	} else {
2358235783Skib		master_priv->sarea_priv->pipeA_x = x;
2359235783Skib		master_priv->sarea_priv->pipeA_y = y;
2360235783Skib	}
2361235783Skib#else
2362235783Skib
2363235783Skib	if (!dev_priv->sarea_priv)
2364235783Skib		return 0;
2365235783Skib
2366235783Skib	if (intel_crtc->pipe) {
2367235783Skib		dev_priv->sarea_priv->planeB_x = x;
2368235783Skib		dev_priv->sarea_priv->planeB_y = y;
2369235783Skib	} else {
2370235783Skib		dev_priv->sarea_priv->planeA_x = x;
2371235783Skib		dev_priv->sarea_priv->planeA_y = y;
2372235783Skib	}
2373235783Skib#endif
2374235783Skib
2375235783Skib	return 0;
2376235783Skib}
2377235783Skib
2378235783Skibstatic void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2379235783Skib{
2380235783Skib	struct drm_device *dev = crtc->dev;
2381235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2382235783Skib	u32 dpa_ctl;
2383235783Skib
2384235783Skib	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2385235783Skib	dpa_ctl = I915_READ(DP_A);
2386235783Skib	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2387235783Skib
2388235783Skib	if (clock < 200000) {
2389235783Skib		u32 temp;
2390235783Skib		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2391235783Skib		/* workaround for 160Mhz:
2392235783Skib		   1) program 0x4600c bits 15:0 = 0x8124
2393235783Skib		   2) program 0x46010 bit 0 = 1
2394235783Skib		   3) program 0x46034 bit 24 = 1
2395235783Skib		   4) program 0x64000 bit 14 = 1
2396235783Skib		   */
2397235783Skib		temp = I915_READ(0x4600c);
2398235783Skib		temp &= 0xffff0000;
2399235783Skib		I915_WRITE(0x4600c, temp | 0x8124);
2400235783Skib
2401235783Skib		temp = I915_READ(0x46010);
2402235783Skib		I915_WRITE(0x46010, temp | 1);
2403235783Skib
2404235783Skib		temp = I915_READ(0x46034);
2405235783Skib		I915_WRITE(0x46034, temp | (1 << 24));
2406235783Skib	} else {
2407235783Skib		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2408235783Skib	}
2409235783Skib	I915_WRITE(DP_A, dpa_ctl);
2410235783Skib
2411235783Skib	POSTING_READ(DP_A);
2412235783Skib	DELAY(500);
2413235783Skib}
2414235783Skib
2415235783Skibstatic void intel_fdi_normal_train(struct drm_crtc *crtc)
2416235783Skib{
2417235783Skib	struct drm_device *dev = crtc->dev;
2418235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2419235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2420235783Skib	int pipe = intel_crtc->pipe;
2421235783Skib	u32 reg, temp;
2422235783Skib
2423235783Skib	/* enable normal train */
2424235783Skib	reg = FDI_TX_CTL(pipe);
2425235783Skib	temp = I915_READ(reg);
2426235783Skib	if (IS_IVYBRIDGE(dev)) {
2427235783Skib		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2428235783Skib		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2429235783Skib	} else {
2430235783Skib		temp &= ~FDI_LINK_TRAIN_NONE;
2431235783Skib		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2432235783Skib	}
2433235783Skib	I915_WRITE(reg, temp);
2434235783Skib
2435235783Skib	reg = FDI_RX_CTL(pipe);
2436235783Skib	temp = I915_READ(reg);
2437235783Skib	if (HAS_PCH_CPT(dev)) {
2438235783Skib		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2439235783Skib		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2440235783Skib	} else {
2441235783Skib		temp &= ~FDI_LINK_TRAIN_NONE;
2442235783Skib		temp |= FDI_LINK_TRAIN_NONE;
2443235783Skib	}
2444235783Skib	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2445235783Skib
2446235783Skib	/* wait one idle pattern time */
2447235783Skib	POSTING_READ(reg);
2448235783Skib	DELAY(1000);
2449235783Skib
2450235783Skib	/* IVB wants error correction enabled */
2451235783Skib	if (IS_IVYBRIDGE(dev))
2452235783Skib		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2453235783Skib			   FDI_FE_ERRC_ENABLE);
2454235783Skib}
2455235783Skib
2456235783Skibstatic void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2457235783Skib{
2458235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2459235783Skib	u32 flags = I915_READ(SOUTH_CHICKEN1);
2460235783Skib
2461235783Skib	flags |= FDI_PHASE_SYNC_OVR(pipe);
2462235783Skib	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2463235783Skib	flags |= FDI_PHASE_SYNC_EN(pipe);
2464235783Skib	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2465235783Skib	POSTING_READ(SOUTH_CHICKEN1);
2466235783Skib}
2467235783Skib
2468235783Skib/* The FDI link training functions for ILK/Ibexpeak. */
2469235783Skibstatic void ironlake_fdi_link_train(struct drm_crtc *crtc)
2470235783Skib{
2471235783Skib	struct drm_device *dev = crtc->dev;
2472235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2473235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2474235783Skib	int pipe = intel_crtc->pipe;
2475235783Skib	int plane = intel_crtc->plane;
2476235783Skib	u32 reg, temp, tries;
2477235783Skib
2478235783Skib	/* FDI needs bits from pipe & plane first */
2479235783Skib	assert_pipe_enabled(dev_priv, pipe);
2480235783Skib	assert_plane_enabled(dev_priv, plane);
2481235783Skib
2482235783Skib	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2483235783Skib	   for train result */
2484235783Skib	reg = FDI_RX_IMR(pipe);
2485235783Skib	temp = I915_READ(reg);
2486235783Skib	temp &= ~FDI_RX_SYMBOL_LOCK;
2487235783Skib	temp &= ~FDI_RX_BIT_LOCK;
2488235783Skib	I915_WRITE(reg, temp);
2489235783Skib	I915_READ(reg);
2490235783Skib	DELAY(150);
2491235783Skib
2492235783Skib	/* enable CPU FDI TX and PCH FDI RX */
2493235783Skib	reg = FDI_TX_CTL(pipe);
2494235783Skib	temp = I915_READ(reg);
2495235783Skib	temp &= ~(7 << 19);
2496235783Skib	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2497235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2498235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1;
2499235783Skib	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2500235783Skib
2501235783Skib	reg = FDI_RX_CTL(pipe);
2502235783Skib	temp = I915_READ(reg);
2503235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2504235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1;
2505235783Skib	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2506235783Skib
2507235783Skib	POSTING_READ(reg);
2508235783Skib	DELAY(150);
2509235783Skib
2510235783Skib	/* Ironlake workaround, enable clock pointer after FDI enable*/
2511235783Skib	if (HAS_PCH_IBX(dev)) {
2512235783Skib		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2513235783Skib		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2514235783Skib			   FDI_RX_PHASE_SYNC_POINTER_EN);
2515235783Skib	}
2516235783Skib
2517235783Skib	reg = FDI_RX_IIR(pipe);
2518235783Skib	for (tries = 0; tries < 5; tries++) {
2519235783Skib		temp = I915_READ(reg);
2520235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2521235783Skib
2522235783Skib		if ((temp & FDI_RX_BIT_LOCK)) {
2523235783Skib			DRM_DEBUG_KMS("FDI train 1 done.\n");
2524235783Skib			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2525235783Skib			break;
2526235783Skib		}
2527235783Skib	}
2528235783Skib	if (tries == 5)
2529235783Skib		DRM_ERROR("FDI train 1 fail!\n");
2530235783Skib
2531235783Skib	/* Train 2 */
2532235783Skib	reg = FDI_TX_CTL(pipe);
2533235783Skib	temp = I915_READ(reg);
2534235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2535235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_2;
2536235783Skib	I915_WRITE(reg, temp);
2537235783Skib
2538235783Skib	reg = FDI_RX_CTL(pipe);
2539235783Skib	temp = I915_READ(reg);
2540235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2541235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_2;
2542235783Skib	I915_WRITE(reg, temp);
2543235783Skib
2544235783Skib	POSTING_READ(reg);
2545235783Skib	DELAY(150);
2546235783Skib
2547235783Skib	reg = FDI_RX_IIR(pipe);
2548235783Skib	for (tries = 0; tries < 5; tries++) {
2549235783Skib		temp = I915_READ(reg);
2550235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2551235783Skib
2552235783Skib		if (temp & FDI_RX_SYMBOL_LOCK) {
2553235783Skib			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2554235783Skib			DRM_DEBUG_KMS("FDI train 2 done.\n");
2555235783Skib			break;
2556235783Skib		}
2557235783Skib	}
2558235783Skib	if (tries == 5)
2559235783Skib		DRM_ERROR("FDI train 2 fail!\n");
2560235783Skib
2561235783Skib	DRM_DEBUG_KMS("FDI train done\n");
2562235783Skib
2563235783Skib}
2564235783Skib
2565235783Skibstatic const int snb_b_fdi_train_param[] = {
2566235783Skib	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2567235783Skib	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2568235783Skib	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2569235783Skib	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2570235783Skib};
2571235783Skib
2572235783Skib/* The FDI link training functions for SNB/Cougarpoint. */
2573235783Skibstatic void gen6_fdi_link_train(struct drm_crtc *crtc)
2574235783Skib{
2575235783Skib	struct drm_device *dev = crtc->dev;
2576235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2577235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2578235783Skib	int pipe = intel_crtc->pipe;
2579235783Skib	u32 reg, temp, i;
2580235783Skib
2581235783Skib	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2582235783Skib	   for train result */
2583235783Skib	reg = FDI_RX_IMR(pipe);
2584235783Skib	temp = I915_READ(reg);
2585235783Skib	temp &= ~FDI_RX_SYMBOL_LOCK;
2586235783Skib	temp &= ~FDI_RX_BIT_LOCK;
2587235783Skib	I915_WRITE(reg, temp);
2588235783Skib
2589235783Skib	POSTING_READ(reg);
2590235783Skib	DELAY(150);
2591235783Skib
2592235783Skib	/* enable CPU FDI TX and PCH FDI RX */
2593235783Skib	reg = FDI_TX_CTL(pipe);
2594235783Skib	temp = I915_READ(reg);
2595235783Skib	temp &= ~(7 << 19);
2596235783Skib	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2597235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2598235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1;
2599235783Skib	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2600235783Skib	/* SNB-B */
2601235783Skib	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2602235783Skib	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2603235783Skib
2604235783Skib	reg = FDI_RX_CTL(pipe);
2605235783Skib	temp = I915_READ(reg);
2606235783Skib	if (HAS_PCH_CPT(dev)) {
2607235783Skib		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2608235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2609235783Skib	} else {
2610235783Skib		temp &= ~FDI_LINK_TRAIN_NONE;
2611235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_1;
2612235783Skib	}
2613235783Skib	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2614235783Skib
2615235783Skib	POSTING_READ(reg);
2616235783Skib	DELAY(150);
2617235783Skib
2618235783Skib	if (HAS_PCH_CPT(dev))
2619235783Skib		cpt_phase_pointer_enable(dev, pipe);
2620235783Skib
2621235783Skib	for (i = 0; i < 4; i++) {
2622235783Skib		reg = FDI_TX_CTL(pipe);
2623235783Skib		temp = I915_READ(reg);
2624235783Skib		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625235783Skib		temp |= snb_b_fdi_train_param[i];
2626235783Skib		I915_WRITE(reg, temp);
2627235783Skib
2628235783Skib		POSTING_READ(reg);
2629235783Skib		DELAY(500);
2630235783Skib
2631235783Skib		reg = FDI_RX_IIR(pipe);
2632235783Skib		temp = I915_READ(reg);
2633235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2634235783Skib
2635235783Skib		if (temp & FDI_RX_BIT_LOCK) {
2636235783Skib			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2637235783Skib			DRM_DEBUG_KMS("FDI train 1 done.\n");
2638235783Skib			break;
2639235783Skib		}
2640235783Skib	}
2641235783Skib	if (i == 4)
2642235783Skib		DRM_ERROR("FDI train 1 fail!\n");
2643235783Skib
2644235783Skib	/* Train 2 */
2645235783Skib	reg = FDI_TX_CTL(pipe);
2646235783Skib	temp = I915_READ(reg);
2647235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2648235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_2;
2649235783Skib	if (IS_GEN6(dev)) {
2650235783Skib		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2651235783Skib		/* SNB-B */
2652235783Skib		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2653235783Skib	}
2654235783Skib	I915_WRITE(reg, temp);
2655235783Skib
2656235783Skib	reg = FDI_RX_CTL(pipe);
2657235783Skib	temp = I915_READ(reg);
2658235783Skib	if (HAS_PCH_CPT(dev)) {
2659235783Skib		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2660235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2661235783Skib	} else {
2662235783Skib		temp &= ~FDI_LINK_TRAIN_NONE;
2663235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_2;
2664235783Skib	}
2665235783Skib	I915_WRITE(reg, temp);
2666235783Skib
2667235783Skib	POSTING_READ(reg);
2668235783Skib	DELAY(150);
2669235783Skib
2670235783Skib	for (i = 0; i < 4; i++) {
2671235783Skib		reg = FDI_TX_CTL(pipe);
2672235783Skib		temp = I915_READ(reg);
2673235783Skib		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2674235783Skib		temp |= snb_b_fdi_train_param[i];
2675235783Skib		I915_WRITE(reg, temp);
2676235783Skib
2677235783Skib		POSTING_READ(reg);
2678235783Skib		DELAY(500);
2679235783Skib
2680235783Skib		reg = FDI_RX_IIR(pipe);
2681235783Skib		temp = I915_READ(reg);
2682235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2683235783Skib
2684235783Skib		if (temp & FDI_RX_SYMBOL_LOCK) {
2685235783Skib			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2686235783Skib			DRM_DEBUG_KMS("FDI train 2 done.\n");
2687235783Skib			break;
2688235783Skib		}
2689235783Skib	}
2690235783Skib	if (i == 4)
2691235783Skib		DRM_ERROR("FDI train 2 fail!\n");
2692235783Skib
2693235783Skib	DRM_DEBUG_KMS("FDI train done.\n");
2694235783Skib}
2695235783Skib
2696235783Skib/* Manual link training for Ivy Bridge A0 parts */
2697235783Skibstatic void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2698235783Skib{
2699235783Skib	struct drm_device *dev = crtc->dev;
2700235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2701235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2702235783Skib	int pipe = intel_crtc->pipe;
2703235783Skib	u32 reg, temp, i;
2704235783Skib
2705235783Skib	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2706235783Skib	   for train result */
2707235783Skib	reg = FDI_RX_IMR(pipe);
2708235783Skib	temp = I915_READ(reg);
2709235783Skib	temp &= ~FDI_RX_SYMBOL_LOCK;
2710235783Skib	temp &= ~FDI_RX_BIT_LOCK;
2711235783Skib	I915_WRITE(reg, temp);
2712235783Skib
2713235783Skib	POSTING_READ(reg);
2714235783Skib	DELAY(150);
2715235783Skib
2716235783Skib	/* enable CPU FDI TX and PCH FDI RX */
2717235783Skib	reg = FDI_TX_CTL(pipe);
2718235783Skib	temp = I915_READ(reg);
2719235783Skib	temp &= ~(7 << 19);
2720235783Skib	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2721235783Skib	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2722235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2723235783Skib	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2724235783Skib	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2725235783Skib	temp |= FDI_COMPOSITE_SYNC;
2726235783Skib	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2727235783Skib
2728235783Skib	reg = FDI_RX_CTL(pipe);
2729235783Skib	temp = I915_READ(reg);
2730235783Skib	temp &= ~FDI_LINK_TRAIN_AUTO;
2731235783Skib	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2732235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2733235783Skib	temp |= FDI_COMPOSITE_SYNC;
2734235783Skib	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2735235783Skib
2736235783Skib	POSTING_READ(reg);
2737235783Skib	DELAY(150);
2738235783Skib
2739235783Skib	for (i = 0; i < 4; i++) {
2740235783Skib		reg = FDI_TX_CTL(pipe);
2741235783Skib		temp = I915_READ(reg);
2742235783Skib		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2743235783Skib		temp |= snb_b_fdi_train_param[i];
2744235783Skib		I915_WRITE(reg, temp);
2745235783Skib
2746235783Skib		POSTING_READ(reg);
2747235783Skib		DELAY(500);
2748235783Skib
2749235783Skib		reg = FDI_RX_IIR(pipe);
2750235783Skib		temp = I915_READ(reg);
2751235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2752235783Skib
2753235783Skib		if (temp & FDI_RX_BIT_LOCK ||
2754235783Skib		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2755235783Skib			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2756235783Skib			DRM_DEBUG_KMS("FDI train 1 done.\n");
2757235783Skib			break;
2758235783Skib		}
2759235783Skib	}
2760235783Skib	if (i == 4)
2761235783Skib		DRM_ERROR("FDI train 1 fail!\n");
2762235783Skib
2763235783Skib	/* Train 2 */
2764235783Skib	reg = FDI_TX_CTL(pipe);
2765235783Skib	temp = I915_READ(reg);
2766235783Skib	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2767235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2768235783Skib	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2769235783Skib	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2770235783Skib	I915_WRITE(reg, temp);
2771235783Skib
2772235783Skib	reg = FDI_RX_CTL(pipe);
2773235783Skib	temp = I915_READ(reg);
2774235783Skib	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2775235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2776235783Skib	I915_WRITE(reg, temp);
2777235783Skib
2778235783Skib	POSTING_READ(reg);
2779235783Skib	DELAY(150);
2780235783Skib
2781235783Skib	for (i = 0; i < 4; i++ ) {
2782235783Skib		reg = FDI_TX_CTL(pipe);
2783235783Skib		temp = I915_READ(reg);
2784235783Skib		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2785235783Skib		temp |= snb_b_fdi_train_param[i];
2786235783Skib		I915_WRITE(reg, temp);
2787235783Skib
2788235783Skib		POSTING_READ(reg);
2789235783Skib		DELAY(500);
2790235783Skib
2791235783Skib		reg = FDI_RX_IIR(pipe);
2792235783Skib		temp = I915_READ(reg);
2793235783Skib		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2794235783Skib
2795235783Skib		if (temp & FDI_RX_SYMBOL_LOCK) {
2796235783Skib			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2797235783Skib			DRM_DEBUG_KMS("FDI train 2 done.\n");
2798235783Skib			break;
2799235783Skib		}
2800235783Skib	}
2801235783Skib	if (i == 4)
2802235783Skib		DRM_ERROR("FDI train 2 fail!\n");
2803235783Skib
2804235783Skib	DRM_DEBUG_KMS("FDI train done.\n");
2805235783Skib}
2806235783Skib
2807235783Skibstatic void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2808235783Skib{
2809235783Skib	struct drm_device *dev = crtc->dev;
2810235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2811235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2812235783Skib	int pipe = intel_crtc->pipe;
2813235783Skib	u32 reg, temp;
2814235783Skib
2815235783Skib	/* Write the TU size bits so error detection works */
2816235783Skib	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2817235783Skib		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2818235783Skib
2819235783Skib	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2820235783Skib	reg = FDI_RX_CTL(pipe);
2821235783Skib	temp = I915_READ(reg);
2822235783Skib	temp &= ~((0x7 << 19) | (0x7 << 16));
2823235783Skib	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2824235783Skib	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2825235783Skib	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2826235783Skib
2827235783Skib	POSTING_READ(reg);
2828235783Skib	DELAY(200);
2829235783Skib
2830235783Skib	/* Switch from Rawclk to PCDclk */
2831235783Skib	temp = I915_READ(reg);
2832235783Skib	I915_WRITE(reg, temp | FDI_PCDCLK);
2833235783Skib
2834235783Skib	POSTING_READ(reg);
2835235783Skib	DELAY(200);
2836235783Skib
2837235783Skib	/* Enable CPU FDI TX PLL, always on for Ironlake */
2838235783Skib	reg = FDI_TX_CTL(pipe);
2839235783Skib	temp = I915_READ(reg);
2840235783Skib	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2841235783Skib		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2842235783Skib
2843235783Skib		POSTING_READ(reg);
2844235783Skib		DELAY(100);
2845235783Skib	}
2846235783Skib}
2847235783Skib
2848235783Skibstatic void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2849235783Skib{
2850235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2851235783Skib	u32 flags = I915_READ(SOUTH_CHICKEN1);
2852235783Skib
2853235783Skib	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2854235783Skib	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2855235783Skib	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2856235783Skib	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2857235783Skib	POSTING_READ(SOUTH_CHICKEN1);
2858235783Skib}
2859235783Skib
2860235783Skibstatic void ironlake_fdi_disable(struct drm_crtc *crtc)
2861235783Skib{
2862235783Skib	struct drm_device *dev = crtc->dev;
2863235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2864235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2865235783Skib	int pipe = intel_crtc->pipe;
2866235783Skib	u32 reg, temp;
2867235783Skib
2868235783Skib	/* disable CPU FDI tx and PCH FDI rx */
2869235783Skib	reg = FDI_TX_CTL(pipe);
2870235783Skib	temp = I915_READ(reg);
2871235783Skib	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2872235783Skib	POSTING_READ(reg);
2873235783Skib
2874235783Skib	reg = FDI_RX_CTL(pipe);
2875235783Skib	temp = I915_READ(reg);
2876235783Skib	temp &= ~(0x7 << 16);
2877235783Skib	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2878235783Skib	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2879235783Skib
2880235783Skib	POSTING_READ(reg);
2881235783Skib	DELAY(100);
2882235783Skib
2883235783Skib	/* Ironlake workaround, disable clock pointer after downing FDI */
2884235783Skib	if (HAS_PCH_IBX(dev)) {
2885235783Skib		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2886235783Skib		I915_WRITE(FDI_RX_CHICKEN(pipe),
2887235783Skib			   I915_READ(FDI_RX_CHICKEN(pipe) &
2888235783Skib				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2889235783Skib	} else if (HAS_PCH_CPT(dev)) {
2890235783Skib		cpt_phase_pointer_disable(dev, pipe);
2891235783Skib	}
2892235783Skib
2893235783Skib	/* still set train pattern 1 */
2894235783Skib	reg = FDI_TX_CTL(pipe);
2895235783Skib	temp = I915_READ(reg);
2896235783Skib	temp &= ~FDI_LINK_TRAIN_NONE;
2897235783Skib	temp |= FDI_LINK_TRAIN_PATTERN_1;
2898235783Skib	I915_WRITE(reg, temp);
2899235783Skib
2900235783Skib	reg = FDI_RX_CTL(pipe);
2901235783Skib	temp = I915_READ(reg);
2902235783Skib	if (HAS_PCH_CPT(dev)) {
2903235783Skib		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2904235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2905235783Skib	} else {
2906235783Skib		temp &= ~FDI_LINK_TRAIN_NONE;
2907235783Skib		temp |= FDI_LINK_TRAIN_PATTERN_1;
2908235783Skib	}
2909235783Skib	/* BPC in FDI rx is consistent with that in PIPECONF */
2910235783Skib	temp &= ~(0x07 << 16);
2911235783Skib	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2912235783Skib	I915_WRITE(reg, temp);
2913235783Skib
2914235783Skib	POSTING_READ(reg);
2915235783Skib	DELAY(100);
2916235783Skib}
2917235783Skib
2918235783Skib/*
2919235783Skib * When we disable a pipe, we need to clear any pending scanline wait events
2920235783Skib * to avoid hanging the ring, which we assume we are waiting on.
2921235783Skib */
2922235783Skibstatic void intel_clear_scanline_wait(struct drm_device *dev)
2923235783Skib{
2924235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2925235783Skib	struct intel_ring_buffer *ring;
2926235783Skib	u32 tmp;
2927235783Skib
2928235783Skib	if (IS_GEN2(dev))
2929235783Skib		/* Can't break the hang on i8xx */
2930235783Skib		return;
2931235783Skib
2932235783Skib	ring = LP_RING(dev_priv);
2933235783Skib	tmp = I915_READ_CTL(ring);
2934235783Skib	if (tmp & RING_WAIT)
2935235783Skib		I915_WRITE_CTL(ring, tmp);
2936235783Skib}
2937235783Skib
2938235783Skibstatic void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2939235783Skib{
2940235783Skib	struct drm_i915_gem_object *obj;
2941235783Skib	struct drm_i915_private *dev_priv;
2942235783Skib	struct drm_device *dev;
2943235783Skib
2944235783Skib	if (crtc->fb == NULL)
2945235783Skib		return;
2946235783Skib
2947235783Skib	obj = to_intel_framebuffer(crtc->fb)->obj;
2948235783Skib	dev = crtc->dev;
2949235783Skib	dev_priv = dev->dev_private;
2950235783Skib	mtx_lock(&dev->event_lock);
2951255013Sjkim	while (atomic_load_acq_int(&obj->pending_flip) != 0)
2952235783Skib		msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
2953235783Skib	mtx_unlock(&dev->event_lock);
2954235783Skib}
2955235783Skib
2956235783Skibstatic bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2957235783Skib{
2958235783Skib	struct drm_device *dev = crtc->dev;
2959235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
2960235783Skib	struct intel_encoder *encoder;
2961235783Skib
2962235783Skib	/*
2963235783Skib	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2964235783Skib	 * must be driven by its own crtc; no sharing is possible.
2965235783Skib	 */
2966235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2967235783Skib		if (encoder->base.crtc != crtc)
2968235783Skib			continue;
2969235783Skib
2970235783Skib		switch (encoder->type) {
2971235783Skib		case INTEL_OUTPUT_EDP:
2972235783Skib			if (!intel_encoder_is_pch_edp(&encoder->base))
2973235783Skib				return false;
2974235783Skib			continue;
2975235783Skib		}
2976235783Skib	}
2977235783Skib
2978235783Skib	return true;
2979235783Skib}
2980235783Skib
2981235783Skib/*
2982235783Skib * Enable PCH resources required for PCH ports:
2983235783Skib *   - PCH PLLs
2984235783Skib *   - FDI training & RX/TX
2985235783Skib *   - update transcoder timings
2986235783Skib *   - DP transcoding bits
2987235783Skib *   - transcoder
2988235783Skib */
2989235783Skibstatic void ironlake_pch_enable(struct drm_crtc *crtc)
2990235783Skib{
2991235783Skib	struct drm_device *dev = crtc->dev;
2992235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2993235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2994235783Skib	int pipe = intel_crtc->pipe;
2995235783Skib	u32 reg, temp, transc_sel;
2996235783Skib
2997235783Skib	/* For PCH output, training FDI link */
2998235783Skib	dev_priv->display.fdi_link_train(crtc);
2999235783Skib
3000235783Skib	intel_enable_pch_pll(dev_priv, pipe);
3001235783Skib
3002235783Skib	if (HAS_PCH_CPT(dev)) {
3003235783Skib		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
3004235783Skib			TRANSC_DPLLB_SEL;
3005235783Skib
3006235783Skib		/* Be sure PCH DPLL SEL is set */
3007235783Skib		temp = I915_READ(PCH_DPLL_SEL);
3008235783Skib		if (pipe == 0) {
3009235783Skib			temp &= ~(TRANSA_DPLLB_SEL);
3010235783Skib			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3011235783Skib		} else if (pipe == 1) {
3012235783Skib			temp &= ~(TRANSB_DPLLB_SEL);
3013235783Skib			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3014235783Skib		} else if (pipe == 2) {
3015235783Skib			temp &= ~(TRANSC_DPLLB_SEL);
3016235783Skib			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
3017235783Skib		}
3018235783Skib		I915_WRITE(PCH_DPLL_SEL, temp);
3019235783Skib	}
3020235783Skib
3021235783Skib	/* set transcoder timing, panel must allow it */
3022235783Skib	assert_panel_unlocked(dev_priv, pipe);
3023235783Skib	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3024235783Skib	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3025235783Skib	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3026235783Skib
3027235783Skib	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3028235783Skib	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3029235783Skib	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3030235783Skib	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3031235783Skib
3032235783Skib	intel_fdi_normal_train(crtc);
3033235783Skib
3034235783Skib	/* For PCH DP, enable TRANS_DP_CTL */
3035235783Skib	if (HAS_PCH_CPT(dev) &&
3036235783Skib	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3037235783Skib	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3038235783Skib		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3039235783Skib		reg = TRANS_DP_CTL(pipe);
3040235783Skib		temp = I915_READ(reg);
3041235783Skib		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3042235783Skib			  TRANS_DP_SYNC_MASK |
3043235783Skib			  TRANS_DP_BPC_MASK);
3044235783Skib		temp |= (TRANS_DP_OUTPUT_ENABLE |
3045235783Skib			 TRANS_DP_ENH_FRAMING);
3046235783Skib		temp |= bpc << 9; /* same format but at 11:9 */
3047235783Skib
3048235783Skib		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3049235783Skib			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3050235783Skib		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3051235783Skib			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3052235783Skib
3053235783Skib		switch (intel_trans_dp_port_sel(crtc)) {
3054235783Skib		case PCH_DP_B:
3055235783Skib			temp |= TRANS_DP_PORT_SEL_B;
3056235783Skib			break;
3057235783Skib		case PCH_DP_C:
3058235783Skib			temp |= TRANS_DP_PORT_SEL_C;
3059235783Skib			break;
3060235783Skib		case PCH_DP_D:
3061235783Skib			temp |= TRANS_DP_PORT_SEL_D;
3062235783Skib			break;
3063235783Skib		default:
3064235783Skib			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3065235783Skib			temp |= TRANS_DP_PORT_SEL_B;
3066235783Skib			break;
3067235783Skib		}
3068235783Skib
3069235783Skib		I915_WRITE(reg, temp);
3070235783Skib	}
3071235783Skib
3072235783Skib	intel_enable_transcoder(dev_priv, pipe);
3073235783Skib}
3074235783Skib
3075235783Skibvoid intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3076235783Skib{
3077235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3078235783Skib	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3079235783Skib	u32 temp;
3080235783Skib
3081235783Skib	temp = I915_READ(dslreg);
3082235783Skib	DELAY(500);
3083235783Skib	if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
3084235783Skib		/* Without this, mode sets may fail silently on FDI */
3085235783Skib		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3086235783Skib		DELAY(250);
3087235783Skib		I915_WRITE(tc2reg, 0);
3088235783Skib		if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
3089235783Skib		    "915cp2"))
3090235783Skib			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3091235783Skib	}
3092235783Skib}
3093235783Skib
3094235783Skibstatic void ironlake_crtc_enable(struct drm_crtc *crtc)
3095235783Skib{
3096235783Skib	struct drm_device *dev = crtc->dev;
3097235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3098235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3099235783Skib	int pipe = intel_crtc->pipe;
3100235783Skib	int plane = intel_crtc->plane;
3101235783Skib	u32 temp;
3102235783Skib	bool is_pch_port;
3103235783Skib
3104235783Skib	if (intel_crtc->active)
3105235783Skib		return;
3106235783Skib
3107235783Skib	intel_crtc->active = true;
3108235783Skib	intel_update_watermarks(dev);
3109235783Skib
3110235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3111235783Skib		temp = I915_READ(PCH_LVDS);
3112235783Skib		if ((temp & LVDS_PORT_EN) == 0)
3113235783Skib			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3114235783Skib	}
3115235783Skib
3116235783Skib	is_pch_port = intel_crtc_driving_pch(crtc);
3117235783Skib
3118235783Skib	if (is_pch_port)
3119235783Skib		ironlake_fdi_pll_enable(crtc);
3120235783Skib	else
3121235783Skib		ironlake_fdi_disable(crtc);
3122235783Skib
3123235783Skib	/* Enable panel fitting for LVDS */
3124235783Skib	if (dev_priv->pch_pf_size &&
3125235783Skib	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3126235783Skib		/* Force use of hard-coded filter coefficients
3127235783Skib		 * as some pre-programmed values are broken,
3128235783Skib		 * e.g. x201.
3129235783Skib		 */
3130235783Skib		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3131235783Skib		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3132235783Skib		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3133235783Skib	}
3134235783Skib
3135235783Skib	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3136235783Skib	intel_enable_plane(dev_priv, plane, pipe);
3137235783Skib
3138235783Skib	if (is_pch_port)
3139235783Skib		ironlake_pch_enable(crtc);
3140235783Skib
3141235783Skib	intel_crtc_load_lut(crtc);
3142235783Skib
3143235783Skib	DRM_LOCK(dev);
3144235783Skib	intel_update_fbc(dev);
3145235783Skib	DRM_UNLOCK(dev);
3146235783Skib
3147235783Skib	intel_crtc_update_cursor(crtc, true);
3148235783Skib}
3149235783Skib
3150235783Skibstatic void ironlake_crtc_disable(struct drm_crtc *crtc)
3151235783Skib{
3152235783Skib	struct drm_device *dev = crtc->dev;
3153235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3154235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3155235783Skib	int pipe = intel_crtc->pipe;
3156235783Skib	int plane = intel_crtc->plane;
3157235783Skib	u32 reg, temp;
3158235783Skib
3159235783Skib	if (!intel_crtc->active)
3160235783Skib		return;
3161235783Skib
3162235783Skib	intel_crtc_wait_for_pending_flips(crtc);
3163235783Skib	drm_vblank_off(dev, pipe);
3164235783Skib	intel_crtc_update_cursor(crtc, false);
3165235783Skib
3166235783Skib	intel_disable_plane(dev_priv, plane, pipe);
3167235783Skib
3168235783Skib	if (dev_priv->cfb_plane == plane)
3169235783Skib		intel_disable_fbc(dev);
3170235783Skib
3171235783Skib	intel_disable_pipe(dev_priv, pipe);
3172235783Skib
3173235783Skib	/* Disable PF */
3174235783Skib	I915_WRITE(PF_CTL(pipe), 0);
3175235783Skib	I915_WRITE(PF_WIN_SZ(pipe), 0);
3176235783Skib
3177235783Skib	ironlake_fdi_disable(crtc);
3178235783Skib
3179235783Skib	/* This is a horrible layering violation; we should be doing this in
3180235783Skib	 * the connector/encoder ->prepare instead, but we don't always have
3181235783Skib	 * enough information there about the config to know whether it will
3182235783Skib	 * actually be necessary or just cause undesired flicker.
3183235783Skib	 */
3184235783Skib	intel_disable_pch_ports(dev_priv, pipe);
3185235783Skib
3186235783Skib	intel_disable_transcoder(dev_priv, pipe);
3187235783Skib
3188235783Skib	if (HAS_PCH_CPT(dev)) {
3189235783Skib		/* disable TRANS_DP_CTL */
3190235783Skib		reg = TRANS_DP_CTL(pipe);
3191235783Skib		temp = I915_READ(reg);
3192235783Skib		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3193235783Skib		temp |= TRANS_DP_PORT_SEL_NONE;
3194235783Skib		I915_WRITE(reg, temp);
3195235783Skib
3196235783Skib		/* disable DPLL_SEL */
3197235783Skib		temp = I915_READ(PCH_DPLL_SEL);
3198235783Skib		switch (pipe) {
3199235783Skib		case 0:
3200235783Skib			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3201235783Skib			break;
3202235783Skib		case 1:
3203235783Skib			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3204235783Skib			break;
3205235783Skib		case 2:
3206235783Skib			/* C shares PLL A or B */
3207235783Skib			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3208235783Skib			break;
3209235783Skib		default:
3210235783Skib			KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3211235783Skib		}
3212235783Skib		I915_WRITE(PCH_DPLL_SEL, temp);
3213235783Skib	}
3214235783Skib
3215235783Skib	/* disable PCH DPLL */
3216235783Skib	if (!intel_crtc->no_pll)
3217235783Skib		intel_disable_pch_pll(dev_priv, pipe);
3218235783Skib
3219235783Skib	/* Switch from PCDclk to Rawclk */
3220235783Skib	reg = FDI_RX_CTL(pipe);
3221235783Skib	temp = I915_READ(reg);
3222235783Skib	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3223235783Skib
3224235783Skib	/* Disable CPU FDI TX PLL */
3225235783Skib	reg = FDI_TX_CTL(pipe);
3226235783Skib	temp = I915_READ(reg);
3227235783Skib	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3228235783Skib
3229235783Skib	POSTING_READ(reg);
3230235783Skib	DELAY(100);
3231235783Skib
3232235783Skib	reg = FDI_RX_CTL(pipe);
3233235783Skib	temp = I915_READ(reg);
3234235783Skib	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3235235783Skib
3236235783Skib	/* Wait for the clocks to turn off. */
3237235783Skib	POSTING_READ(reg);
3238235783Skib	DELAY(100);
3239235783Skib
3240235783Skib	intel_crtc->active = false;
3241235783Skib	intel_update_watermarks(dev);
3242235783Skib
3243235783Skib	DRM_LOCK(dev);
3244235783Skib	intel_update_fbc(dev);
3245235783Skib	intel_clear_scanline_wait(dev);
3246235783Skib	DRM_UNLOCK(dev);
3247235783Skib}
3248235783Skib
3249235783Skibstatic void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3250235783Skib{
3251235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3252235783Skib	int pipe = intel_crtc->pipe;
3253235783Skib	int plane = intel_crtc->plane;
3254235783Skib
3255235783Skib	/* XXX: When our outputs are all unaware of DPMS modes other than off
3256235783Skib	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3257235783Skib	 */
3258235783Skib	switch (mode) {
3259235783Skib	case DRM_MODE_DPMS_ON:
3260235783Skib	case DRM_MODE_DPMS_STANDBY:
3261235783Skib	case DRM_MODE_DPMS_SUSPEND:
3262235783Skib		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3263235783Skib		ironlake_crtc_enable(crtc);
3264235783Skib		break;
3265235783Skib
3266235783Skib	case DRM_MODE_DPMS_OFF:
3267235783Skib		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3268235783Skib		ironlake_crtc_disable(crtc);
3269235783Skib		break;
3270235783Skib	}
3271235783Skib}
3272235783Skib
3273235783Skibstatic void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3274235783Skib{
3275235783Skib	if (!enable && intel_crtc->overlay) {
3276235783Skib		struct drm_device *dev = intel_crtc->base.dev;
3277235783Skib		struct drm_i915_private *dev_priv = dev->dev_private;
3278235783Skib
3279235783Skib		DRM_LOCK(dev);
3280235783Skib		dev_priv->mm.interruptible = false;
3281235783Skib		(void) intel_overlay_switch_off(intel_crtc->overlay);
3282235783Skib		dev_priv->mm.interruptible = true;
3283235783Skib		DRM_UNLOCK(dev);
3284235783Skib	}
3285235783Skib
3286235783Skib	/* Let userspace switch the overlay on again. In most cases userspace
3287235783Skib	 * has to recompute where to put it anyway.
3288235783Skib	 */
3289235783Skib}
3290235783Skib
3291235783Skibstatic void i9xx_crtc_enable(struct drm_crtc *crtc)
3292235783Skib{
3293235783Skib	struct drm_device *dev = crtc->dev;
3294235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3295235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3296235783Skib	int pipe = intel_crtc->pipe;
3297235783Skib	int plane = intel_crtc->plane;
3298235783Skib
3299235783Skib	if (intel_crtc->active)
3300235783Skib		return;
3301235783Skib
3302235783Skib	intel_crtc->active = true;
3303235783Skib	intel_update_watermarks(dev);
3304235783Skib
3305235783Skib	intel_enable_pll(dev_priv, pipe);
3306235783Skib	intel_enable_pipe(dev_priv, pipe, false);
3307235783Skib	intel_enable_plane(dev_priv, plane, pipe);
3308235783Skib
3309235783Skib	intel_crtc_load_lut(crtc);
3310235783Skib	intel_update_fbc(dev);
3311235783Skib
3312235783Skib	/* Give the overlay scaler a chance to enable if it's on this pipe */
3313235783Skib	intel_crtc_dpms_overlay(intel_crtc, true);
3314235783Skib	intel_crtc_update_cursor(crtc, true);
3315235783Skib}
3316235783Skib
3317235783Skibstatic void i9xx_crtc_disable(struct drm_crtc *crtc)
3318235783Skib{
3319235783Skib	struct drm_device *dev = crtc->dev;
3320235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3321235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3322235783Skib	int pipe = intel_crtc->pipe;
3323235783Skib	int plane = intel_crtc->plane;
3324235783Skib
3325235783Skib	if (!intel_crtc->active)
3326235783Skib		return;
3327235783Skib
3328235783Skib	/* Give the overlay scaler a chance to disable if it's on this pipe */
3329235783Skib	intel_crtc_wait_for_pending_flips(crtc);
3330235783Skib	drm_vblank_off(dev, pipe);
3331235783Skib	intel_crtc_dpms_overlay(intel_crtc, false);
3332235783Skib	intel_crtc_update_cursor(crtc, false);
3333235783Skib
3334235783Skib	if (dev_priv->cfb_plane == plane)
3335235783Skib		intel_disable_fbc(dev);
3336235783Skib
3337235783Skib	intel_disable_plane(dev_priv, plane, pipe);
3338235783Skib	intel_disable_pipe(dev_priv, pipe);
3339235783Skib	intel_disable_pll(dev_priv, pipe);
3340235783Skib
3341235783Skib	intel_crtc->active = false;
3342235783Skib	intel_update_fbc(dev);
3343235783Skib	intel_update_watermarks(dev);
3344235783Skib	intel_clear_scanline_wait(dev);
3345235783Skib}
3346235783Skib
3347235783Skibstatic void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3348235783Skib{
3349235783Skib	/* XXX: When our outputs are all unaware of DPMS modes other than off
3350235783Skib	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3351235783Skib	 */
3352235783Skib	switch (mode) {
3353235783Skib	case DRM_MODE_DPMS_ON:
3354235783Skib	case DRM_MODE_DPMS_STANDBY:
3355235783Skib	case DRM_MODE_DPMS_SUSPEND:
3356235783Skib		i9xx_crtc_enable(crtc);
3357235783Skib		break;
3358235783Skib	case DRM_MODE_DPMS_OFF:
3359235783Skib		i9xx_crtc_disable(crtc);
3360235783Skib		break;
3361235783Skib	}
3362235783Skib}
3363235783Skib
3364235783Skib/**
3365235783Skib * Sets the power management mode of the pipe and plane.
3366235783Skib */
3367235783Skibstatic void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3368235783Skib{
3369235783Skib	struct drm_device *dev = crtc->dev;
3370235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3371235783Skib#if 0
3372235783Skib	struct drm_i915_master_private *master_priv;
3373235783Skib#endif
3374235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3375235783Skib	int pipe = intel_crtc->pipe;
3376235783Skib	bool enabled;
3377235783Skib
3378235783Skib	if (intel_crtc->dpms_mode == mode)
3379235783Skib		return;
3380235783Skib
3381235783Skib	intel_crtc->dpms_mode = mode;
3382235783Skib
3383235783Skib	dev_priv->display.dpms(crtc, mode);
3384235783Skib
3385235783Skib#if 0
3386235783Skib	if (!dev->primary->master)
3387235783Skib		return;
3388235783Skib
3389235783Skib	master_priv = dev->primary->master->driver_priv;
3390235783Skib	if (!master_priv->sarea_priv)
3391235783Skib		return;
3392235783Skib#else
3393235783Skib	if (!dev_priv->sarea_priv)
3394235783Skib		return;
3395235783Skib#endif
3396235783Skib
3397235783Skib	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3398235783Skib
3399235783Skib	switch (pipe) {
3400235783Skib	case 0:
3401235783Skib#if 0
3402235783Skib		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3403235783Skib		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3404235783Skib#else
3405235783Skib		dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
3406235783Skib		dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
3407235783Skib#endif
3408235783Skib		break;
3409235783Skib	case 1:
3410235783Skib#if 0
3411235783Skib		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3412235783Skib		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3413235783Skib#else
3414235783Skib		dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
3415235783Skib		dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
3416235783Skib#endif
3417235783Skib		break;
3418235783Skib	default:
3419235783Skib		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3420235783Skib		break;
3421235783Skib	}
3422235783Skib}
3423235783Skib
3424235783Skibstatic void intel_crtc_disable(struct drm_crtc *crtc)
3425235783Skib{
3426235783Skib	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3427235783Skib	struct drm_device *dev = crtc->dev;
3428235783Skib
3429235783Skib	/* Flush any pending WAITs before we disable the pipe. Note that
3430235783Skib	 * we need to drop the struct_mutex in order to acquire it again
3431235783Skib	 * during the lowlevel dpms routines around a couple of the
3432235783Skib	 * operations. It does not look trivial nor desirable to move
3433235783Skib	 * that locking higher. So instead we leave a window for the
3434235783Skib	 * submission of further commands on the fb before we can actually
3435235783Skib	 * disable it. This race with userspace exists anyway, and we can
3436235783Skib	 * only rely on the pipe being disabled by userspace after it
3437235783Skib	 * receives the hotplug notification and has flushed any pending
3438235783Skib	 * batches.
3439235783Skib	 */
3440235783Skib	if (crtc->fb) {
3441235783Skib		DRM_LOCK(dev);
3442235783Skib		intel_finish_fb(crtc->fb);
3443235783Skib		DRM_UNLOCK(dev);
3444235783Skib	}
3445235783Skib
3446235783Skib	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3447235783Skib 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3448235783Skib	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3449235783Skib
3450235783Skib	if (crtc->fb) {
3451235783Skib		DRM_LOCK(dev);
3452235783Skib		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3453235783Skib		DRM_UNLOCK(dev);
3454235783Skib	}
3455235783Skib}
3456235783Skib
3457235783Skib/* Prepare for a mode set.
3458235783Skib *
3459235783Skib * Note we could be a lot smarter here.  We need to figure out which outputs
3460235783Skib * will be enabled, which disabled (in short, how the config will changes)
3461235783Skib * and perform the minimum necessary steps to accomplish that, e.g. updating
3462235783Skib * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3463235783Skib * panel fitting is in the proper state, etc.
3464235783Skib */
3465235783Skibstatic void i9xx_crtc_prepare(struct drm_crtc *crtc)
3466235783Skib{
3467235783Skib	i9xx_crtc_disable(crtc);
3468235783Skib}
3469235783Skib
3470235783Skibstatic void i9xx_crtc_commit(struct drm_crtc *crtc)
3471235783Skib{
3472235783Skib	i9xx_crtc_enable(crtc);
3473235783Skib}
3474235783Skib
3475235783Skibstatic void ironlake_crtc_prepare(struct drm_crtc *crtc)
3476235783Skib{
3477235783Skib	ironlake_crtc_disable(crtc);
3478235783Skib}
3479235783Skib
3480235783Skibstatic void ironlake_crtc_commit(struct drm_crtc *crtc)
3481235783Skib{
3482235783Skib	ironlake_crtc_enable(crtc);
3483235783Skib}
3484235783Skib
3485235783Skibvoid intel_encoder_prepare(struct drm_encoder *encoder)
3486235783Skib{
3487235783Skib	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3488235783Skib	/* lvds has its own version of prepare see intel_lvds_prepare */
3489235783Skib	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3490235783Skib}
3491235783Skib
3492235783Skibvoid intel_encoder_commit(struct drm_encoder *encoder)
3493235783Skib{
3494235783Skib	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3495235783Skib	struct drm_device *dev = encoder->dev;
3496235783Skib	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3497235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3498235783Skib
3499235783Skib	/* lvds has its own version of commit see intel_lvds_commit */
3500235783Skib	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3501235783Skib
3502235783Skib	if (HAS_PCH_CPT(dev))
3503235783Skib		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3504235783Skib}
3505235783Skib
3506235783Skibvoid intel_encoder_destroy(struct drm_encoder *encoder)
3507235783Skib{
3508235783Skib	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3509235783Skib
3510235783Skib	drm_encoder_cleanup(encoder);
3511235783Skib	free(intel_encoder, DRM_MEM_KMS);
3512235783Skib}
3513235783Skib
3514235783Skibstatic bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3515254797Sdumbbell				  const struct drm_display_mode *mode,
3516235783Skib				  struct drm_display_mode *adjusted_mode)
3517235783Skib{
3518235783Skib	struct drm_device *dev = crtc->dev;
3519235783Skib
3520235783Skib	if (HAS_PCH_SPLIT(dev)) {
3521235783Skib		/* FDI link clock is fixed at 2.7G */
3522235783Skib		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3523235783Skib			return false;
3524235783Skib	}
3525235783Skib
3526235783Skib	/* All interlaced capable intel hw wants timings in frames. Note though
3527235783Skib	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3528235783Skib	 * timings, so we need to be careful not to clobber these.*/
3529235783Skib	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3530235783Skib		drm_mode_set_crtcinfo(adjusted_mode, 0);
3531235783Skib
3532235783Skib	return true;
3533235783Skib}
3534235783Skib
3535235783Skibstatic int i945_get_display_clock_speed(struct drm_device *dev)
3536235783Skib{
3537235783Skib	return 400000;
3538235783Skib}
3539235783Skib
3540235783Skibstatic int i915_get_display_clock_speed(struct drm_device *dev)
3541235783Skib{
3542235783Skib	return 333000;
3543235783Skib}
3544235783Skib
3545235783Skibstatic int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3546235783Skib{
3547235783Skib	return 200000;
3548235783Skib}
3549235783Skib
3550235783Skibstatic int i915gm_get_display_clock_speed(struct drm_device *dev)
3551235783Skib{
3552235783Skib	u16 gcfgc = 0;
3553235783Skib
3554235783Skib	gcfgc = pci_read_config(dev->device, GCFGC, 2);
3555235783Skib
3556235783Skib	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3557235783Skib		return 133000;
3558235783Skib	else {
3559235783Skib		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3560235783Skib		case GC_DISPLAY_CLOCK_333_MHZ:
3561235783Skib			return 333000;
3562235783Skib		default:
3563235783Skib		case GC_DISPLAY_CLOCK_190_200_MHZ:
3564235783Skib			return 190000;
3565235783Skib		}
3566235783Skib	}
3567235783Skib}
3568235783Skib
3569235783Skibstatic int i865_get_display_clock_speed(struct drm_device *dev)
3570235783Skib{
3571235783Skib	return 266000;
3572235783Skib}
3573235783Skib
3574235783Skibstatic int i855_get_display_clock_speed(struct drm_device *dev)
3575235783Skib{
3576235783Skib	u16 hpllcc = 0;
3577235783Skib	/* Assume that the hardware is in the high speed state.  This
3578235783Skib	 * should be the default.
3579235783Skib	 */
3580235783Skib	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3581235783Skib	case GC_CLOCK_133_200:
3582235783Skib	case GC_CLOCK_100_200:
3583235783Skib		return 200000;
3584235783Skib	case GC_CLOCK_166_250:
3585235783Skib		return 250000;
3586235783Skib	case GC_CLOCK_100_133:
3587235783Skib		return 133000;
3588235783Skib	}
3589235783Skib
3590235783Skib	/* Shouldn't happen */
3591235783Skib	return 0;
3592235783Skib}
3593235783Skib
3594235783Skibstatic int i830_get_display_clock_speed(struct drm_device *dev)
3595235783Skib{
3596235783Skib	return 133000;
3597235783Skib}
3598235783Skib
3599235783Skibstruct fdi_m_n {
3600235783Skib	u32        tu;
3601235783Skib	u32        gmch_m;
3602235783Skib	u32        gmch_n;
3603235783Skib	u32        link_m;
3604235783Skib	u32        link_n;
3605235783Skib};
3606235783Skib
3607235783Skibstatic void
3608235783Skibfdi_reduce_ratio(u32 *num, u32 *den)
3609235783Skib{
3610235783Skib	while (*num > 0xffffff || *den > 0xffffff) {
3611235783Skib		*num >>= 1;
3612235783Skib		*den >>= 1;
3613235783Skib	}
3614235783Skib}
3615235783Skib
3616235783Skibstatic void
3617235783Skibironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3618235783Skib		     int link_clock, struct fdi_m_n *m_n)
3619235783Skib{
3620235783Skib	m_n->tu = 64; /* default size */
3621235783Skib
3622235783Skib	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3623235783Skib	m_n->gmch_m = bits_per_pixel * pixel_clock;
3624235783Skib	m_n->gmch_n = link_clock * nlanes * 8;
3625235783Skib	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3626235783Skib
3627235783Skib	m_n->link_m = pixel_clock;
3628235783Skib	m_n->link_n = link_clock;
3629235783Skib	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3630235783Skib}
3631235783Skib
3632235783Skib
3633235783Skibstruct intel_watermark_params {
3634235783Skib	unsigned long fifo_size;
3635235783Skib	unsigned long max_wm;
3636235783Skib	unsigned long default_wm;
3637235783Skib	unsigned long guard_size;
3638235783Skib	unsigned long cacheline_size;
3639235783Skib};
3640235783Skib
3641235783Skib/* Pineview has different values for various configs */
3642235783Skibstatic const struct intel_watermark_params pineview_display_wm = {
3643235783Skib	PINEVIEW_DISPLAY_FIFO,
3644235783Skib	PINEVIEW_MAX_WM,
3645235783Skib	PINEVIEW_DFT_WM,
3646235783Skib	PINEVIEW_GUARD_WM,
3647235783Skib	PINEVIEW_FIFO_LINE_SIZE
3648235783Skib};
3649235783Skibstatic const struct intel_watermark_params pineview_display_hplloff_wm = {
3650235783Skib	PINEVIEW_DISPLAY_FIFO,
3651235783Skib	PINEVIEW_MAX_WM,
3652235783Skib	PINEVIEW_DFT_HPLLOFF_WM,
3653235783Skib	PINEVIEW_GUARD_WM,
3654235783Skib	PINEVIEW_FIFO_LINE_SIZE
3655235783Skib};
3656235783Skibstatic const struct intel_watermark_params pineview_cursor_wm = {
3657235783Skib	PINEVIEW_CURSOR_FIFO,
3658235783Skib	PINEVIEW_CURSOR_MAX_WM,
3659235783Skib	PINEVIEW_CURSOR_DFT_WM,
3660235783Skib	PINEVIEW_CURSOR_GUARD_WM,
3661235783Skib	PINEVIEW_FIFO_LINE_SIZE,
3662235783Skib};
3663235783Skibstatic const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3664235783Skib	PINEVIEW_CURSOR_FIFO,
3665235783Skib	PINEVIEW_CURSOR_MAX_WM,
3666235783Skib	PINEVIEW_CURSOR_DFT_WM,
3667235783Skib	PINEVIEW_CURSOR_GUARD_WM,
3668235783Skib	PINEVIEW_FIFO_LINE_SIZE
3669235783Skib};
3670235783Skibstatic const struct intel_watermark_params g4x_wm_info = {
3671235783Skib	G4X_FIFO_SIZE,
3672235783Skib	G4X_MAX_WM,
3673235783Skib	G4X_MAX_WM,
3674235783Skib	2,
3675235783Skib	G4X_FIFO_LINE_SIZE,
3676235783Skib};
3677235783Skibstatic const struct intel_watermark_params g4x_cursor_wm_info = {
3678235783Skib	I965_CURSOR_FIFO,
3679235783Skib	I965_CURSOR_MAX_WM,
3680235783Skib	I965_CURSOR_DFT_WM,
3681235783Skib	2,
3682235783Skib	G4X_FIFO_LINE_SIZE,
3683235783Skib};
3684235783Skibstatic const struct intel_watermark_params i965_cursor_wm_info = {
3685235783Skib	I965_CURSOR_FIFO,
3686235783Skib	I965_CURSOR_MAX_WM,
3687235783Skib	I965_CURSOR_DFT_WM,
3688235783Skib	2,
3689235783Skib	I915_FIFO_LINE_SIZE,
3690235783Skib};
3691235783Skibstatic const struct intel_watermark_params i945_wm_info = {
3692235783Skib	I945_FIFO_SIZE,
3693235783Skib	I915_MAX_WM,
3694235783Skib	1,
3695235783Skib	2,
3696235783Skib	I915_FIFO_LINE_SIZE
3697235783Skib};
3698235783Skibstatic const struct intel_watermark_params i915_wm_info = {
3699235783Skib	I915_FIFO_SIZE,
3700235783Skib	I915_MAX_WM,
3701235783Skib	1,
3702235783Skib	2,
3703235783Skib	I915_FIFO_LINE_SIZE
3704235783Skib};
3705235783Skibstatic const struct intel_watermark_params i855_wm_info = {
3706235783Skib	I855GM_FIFO_SIZE,
3707235783Skib	I915_MAX_WM,
3708235783Skib	1,
3709235783Skib	2,
3710235783Skib	I830_FIFO_LINE_SIZE
3711235783Skib};
3712235783Skibstatic const struct intel_watermark_params i830_wm_info = {
3713235783Skib	I830_FIFO_SIZE,
3714235783Skib	I915_MAX_WM,
3715235783Skib	1,
3716235783Skib	2,
3717235783Skib	I830_FIFO_LINE_SIZE
3718235783Skib};
3719235783Skib
3720235783Skibstatic const struct intel_watermark_params ironlake_display_wm_info = {
3721235783Skib	ILK_DISPLAY_FIFO,
3722235783Skib	ILK_DISPLAY_MAXWM,
3723235783Skib	ILK_DISPLAY_DFTWM,
3724235783Skib	2,
3725235783Skib	ILK_FIFO_LINE_SIZE
3726235783Skib};
3727235783Skibstatic const struct intel_watermark_params ironlake_cursor_wm_info = {
3728235783Skib	ILK_CURSOR_FIFO,
3729235783Skib	ILK_CURSOR_MAXWM,
3730235783Skib	ILK_CURSOR_DFTWM,
3731235783Skib	2,
3732235783Skib	ILK_FIFO_LINE_SIZE
3733235783Skib};
3734235783Skibstatic const struct intel_watermark_params ironlake_display_srwm_info = {
3735235783Skib	ILK_DISPLAY_SR_FIFO,
3736235783Skib	ILK_DISPLAY_MAX_SRWM,
3737235783Skib	ILK_DISPLAY_DFT_SRWM,
3738235783Skib	2,
3739235783Skib	ILK_FIFO_LINE_SIZE
3740235783Skib};
3741235783Skibstatic const struct intel_watermark_params ironlake_cursor_srwm_info = {
3742235783Skib	ILK_CURSOR_SR_FIFO,
3743235783Skib	ILK_CURSOR_MAX_SRWM,
3744235783Skib	ILK_CURSOR_DFT_SRWM,
3745235783Skib	2,
3746235783Skib	ILK_FIFO_LINE_SIZE
3747235783Skib};
3748235783Skib
3749235783Skibstatic const struct intel_watermark_params sandybridge_display_wm_info = {
3750235783Skib	SNB_DISPLAY_FIFO,
3751235783Skib	SNB_DISPLAY_MAXWM,
3752235783Skib	SNB_DISPLAY_DFTWM,
3753235783Skib	2,
3754235783Skib	SNB_FIFO_LINE_SIZE
3755235783Skib};
3756235783Skibstatic const struct intel_watermark_params sandybridge_cursor_wm_info = {
3757235783Skib	SNB_CURSOR_FIFO,
3758235783Skib	SNB_CURSOR_MAXWM,
3759235783Skib	SNB_CURSOR_DFTWM,
3760235783Skib	2,
3761235783Skib	SNB_FIFO_LINE_SIZE
3762235783Skib};
3763235783Skibstatic const struct intel_watermark_params sandybridge_display_srwm_info = {
3764235783Skib	SNB_DISPLAY_SR_FIFO,
3765235783Skib	SNB_DISPLAY_MAX_SRWM,
3766235783Skib	SNB_DISPLAY_DFT_SRWM,
3767235783Skib	2,
3768235783Skib	SNB_FIFO_LINE_SIZE
3769235783Skib};
3770235783Skibstatic const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3771235783Skib	SNB_CURSOR_SR_FIFO,
3772235783Skib	SNB_CURSOR_MAX_SRWM,
3773235783Skib	SNB_CURSOR_DFT_SRWM,
3774235783Skib	2,
3775235783Skib	SNB_FIFO_LINE_SIZE
3776235783Skib};
3777235783Skib
3778235783Skib
3779235783Skib/**
3780235783Skib * intel_calculate_wm - calculate watermark level
3781235783Skib * @clock_in_khz: pixel clock
3782235783Skib * @wm: chip FIFO params
3783235783Skib * @pixel_size: display pixel size
3784235783Skib * @latency_ns: memory latency for the platform
3785235783Skib *
3786235783Skib * Calculate the watermark level (the level at which the display plane will
3787235783Skib * start fetching from memory again).  Each chip has a different display
3788235783Skib * FIFO size and allocation, so the caller needs to figure that out and pass
3789235783Skib * in the correct intel_watermark_params structure.
3790235783Skib *
3791235783Skib * As the pixel clock runs, the FIFO will be drained at a rate that depends
3792235783Skib * on the pixel size.  When it reaches the watermark level, it'll start
3793235783Skib * fetching FIFO line sized based chunks from memory until the FIFO fills
3794235783Skib * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3795235783Skib * will occur, and a display engine hang could result.
3796235783Skib */
3797235783Skibstatic unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3798235783Skib					const struct intel_watermark_params *wm,
3799235783Skib					int fifo_size,
3800235783Skib					int pixel_size,
3801235783Skib					unsigned long latency_ns)
3802235783Skib{
3803235783Skib	long entries_required, wm_size;
3804235783Skib
3805235783Skib	/*
3806235783Skib	 * Note: we need to make sure we don't overflow for various clock &
3807235783Skib	 * latency values.
3808235783Skib	 * clocks go from a few thousand to several hundred thousand.
3809235783Skib	 * latency is usually a few thousand
3810235783Skib	 */
3811235783Skib	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3812235783Skib		1000;
3813235783Skib	entries_required = howmany(entries_required, wm->cacheline_size);
3814235783Skib
3815235783Skib	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3816235783Skib
3817235783Skib	wm_size = fifo_size - (entries_required + wm->guard_size);
3818235783Skib
3819235783Skib	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3820235783Skib
3821235783Skib	/* Don't promote wm_size to unsigned... */
3822235783Skib	if (wm_size > (long)wm->max_wm)
3823235783Skib		wm_size = wm->max_wm;
3824235783Skib	if (wm_size <= 0)
3825235783Skib		wm_size = wm->default_wm;
3826235783Skib	return wm_size;
3827235783Skib}
3828235783Skib
3829235783Skibstruct cxsr_latency {
3830235783Skib	int is_desktop;
3831235783Skib	int is_ddr3;
3832235783Skib	unsigned long fsb_freq;
3833235783Skib	unsigned long mem_freq;
3834235783Skib	unsigned long display_sr;
3835235783Skib	unsigned long display_hpll_disable;
3836235783Skib	unsigned long cursor_sr;
3837235783Skib	unsigned long cursor_hpll_disable;
3838235783Skib};
3839235783Skib
3840235783Skibstatic const struct cxsr_latency cxsr_latency_table[] = {
3841235783Skib	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3842235783Skib	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3843235783Skib	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3844235783Skib	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3845235783Skib	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3846235783Skib
3847235783Skib	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3848235783Skib	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3849235783Skib	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3850235783Skib	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3851235783Skib	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3852235783Skib
3853235783Skib	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3854235783Skib	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3855235783Skib	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3856235783Skib	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3857235783Skib	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3858235783Skib
3859235783Skib	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3860235783Skib	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3861235783Skib	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3862235783Skib	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3863235783Skib	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3864235783Skib
3865235783Skib	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3866235783Skib	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3867235783Skib	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3868235783Skib	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3869235783Skib	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3870235783Skib
3871235783Skib	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3872235783Skib	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3873235783Skib	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3874235783Skib	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3875235783Skib	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3876235783Skib};
3877235783Skib
3878235783Skibstatic const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3879235783Skib							 int is_ddr3,
3880235783Skib							 int fsb,
3881235783Skib							 int mem)
3882235783Skib{
3883235783Skib	const struct cxsr_latency *latency;
3884235783Skib	int i;
3885235783Skib
3886235783Skib	if (fsb == 0 || mem == 0)
3887235783Skib		return NULL;
3888235783Skib
3889235783Skib	for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
3890235783Skib		latency = &cxsr_latency_table[i];
3891235783Skib		if (is_desktop == latency->is_desktop &&
3892235783Skib		    is_ddr3 == latency->is_ddr3 &&
3893235783Skib		    fsb == latency->fsb_freq && mem == latency->mem_freq)
3894235783Skib			return latency;
3895235783Skib	}
3896235783Skib
3897235783Skib	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3898235783Skib
3899235783Skib	return NULL;
3900235783Skib}
3901235783Skib
3902235783Skibstatic void pineview_disable_cxsr(struct drm_device *dev)
3903235783Skib{
3904235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3905235783Skib
3906235783Skib	/* deactivate cxsr */
3907235783Skib	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3908235783Skib}
3909235783Skib
3910235783Skib/*
3911235783Skib * Latency for FIFO fetches is dependent on several factors:
3912235783Skib *   - memory configuration (speed, channels)
3913235783Skib *   - chipset
3914235783Skib *   - current MCH state
3915235783Skib * It can be fairly high in some situations, so here we assume a fairly
3916235783Skib * pessimal value.  It's a tradeoff between extra memory fetches (if we
3917235783Skib * set this value too high, the FIFO will fetch frequently to stay full)
3918235783Skib * and power consumption (set it too low to save power and we might see
3919235783Skib * FIFO underruns and display "flicker").
3920235783Skib *
3921235783Skib * A value of 5us seems to be a good balance; safe for very low end
3922235783Skib * platforms but not overly aggressive on lower latency configs.
3923235783Skib */
3924235783Skibstatic const int latency_ns = 5000;
3925235783Skib
3926235783Skibstatic int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3927235783Skib{
3928235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3929235783Skib	uint32_t dsparb = I915_READ(DSPARB);
3930235783Skib	int size;
3931235783Skib
3932235783Skib	size = dsparb & 0x7f;
3933235783Skib	if (plane)
3934235783Skib		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3935235783Skib
3936235783Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3937235783Skib		      plane ? "B" : "A", size);
3938235783Skib
3939235783Skib	return size;
3940235783Skib}
3941235783Skib
3942235783Skibstatic int i85x_get_fifo_size(struct drm_device *dev, int plane)
3943235783Skib{
3944235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3945235783Skib	uint32_t dsparb = I915_READ(DSPARB);
3946235783Skib	int size;
3947235783Skib
3948235783Skib	size = dsparb & 0x1ff;
3949235783Skib	if (plane)
3950235783Skib		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3951235783Skib	size >>= 1; /* Convert to cachelines */
3952235783Skib
3953235783Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3954235783Skib		      plane ? "B" : "A", size);
3955235783Skib
3956235783Skib	return size;
3957235783Skib}
3958235783Skib
3959235783Skibstatic int i845_get_fifo_size(struct drm_device *dev, int plane)
3960235783Skib{
3961235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3962235783Skib	uint32_t dsparb = I915_READ(DSPARB);
3963235783Skib	int size;
3964235783Skib
3965235783Skib	size = dsparb & 0x7f;
3966235783Skib	size >>= 2; /* Convert to cachelines */
3967235783Skib
3968235783Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3969235783Skib		      plane ? "B" : "A",
3970235783Skib		      size);
3971235783Skib
3972235783Skib	return size;
3973235783Skib}
3974235783Skib
3975235783Skibstatic int i830_get_fifo_size(struct drm_device *dev, int plane)
3976235783Skib{
3977235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3978235783Skib	uint32_t dsparb = I915_READ(DSPARB);
3979235783Skib	int size;
3980235783Skib
3981235783Skib	size = dsparb & 0x7f;
3982235783Skib	size >>= 1; /* Convert to cachelines */
3983235783Skib
3984235783Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3985235783Skib		      plane ? "B" : "A", size);
3986235783Skib
3987235783Skib	return size;
3988235783Skib}
3989235783Skib
3990235783Skibstatic struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3991235783Skib{
3992235783Skib	struct drm_crtc *crtc, *enabled = NULL;
3993235783Skib
3994235783Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3995235783Skib		if (crtc->enabled && crtc->fb) {
3996235783Skib			if (enabled)
3997235783Skib				return NULL;
3998235783Skib			enabled = crtc;
3999235783Skib		}
4000235783Skib	}
4001235783Skib
4002235783Skib	return enabled;
4003235783Skib}
4004235783Skib
4005235783Skibstatic void pineview_update_wm(struct drm_device *dev)
4006235783Skib{
4007235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4008235783Skib	struct drm_crtc *crtc;
4009235783Skib	const struct cxsr_latency *latency;
4010235783Skib	u32 reg;
4011235783Skib	unsigned long wm;
4012235783Skib
4013235783Skib	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4014235783Skib					 dev_priv->fsb_freq, dev_priv->mem_freq);
4015235783Skib	if (!latency) {
4016235783Skib		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4017235783Skib		pineview_disable_cxsr(dev);
4018235783Skib		return;
4019235783Skib	}
4020235783Skib
4021235783Skib	crtc = single_enabled_crtc(dev);
4022235783Skib	if (crtc) {
4023235783Skib		int clock = crtc->mode.clock;
4024235783Skib		int pixel_size = crtc->fb->bits_per_pixel / 8;
4025235783Skib
4026235783Skib		/* Display SR */
4027235783Skib		wm = intel_calculate_wm(clock, &pineview_display_wm,
4028235783Skib					pineview_display_wm.fifo_size,
4029235783Skib					pixel_size, latency->display_sr);
4030235783Skib		reg = I915_READ(DSPFW1);
4031235783Skib		reg &= ~DSPFW_SR_MASK;
4032235783Skib		reg |= wm << DSPFW_SR_SHIFT;
4033235783Skib		I915_WRITE(DSPFW1, reg);
4034235783Skib		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4035235783Skib
4036235783Skib		/* cursor SR */
4037235783Skib		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4038235783Skib					pineview_display_wm.fifo_size,
4039235783Skib					pixel_size, latency->cursor_sr);
4040235783Skib		reg = I915_READ(DSPFW3);
4041235783Skib		reg &= ~DSPFW_CURSOR_SR_MASK;
4042235783Skib		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4043235783Skib		I915_WRITE(DSPFW3, reg);
4044235783Skib
4045235783Skib		/* Display HPLL off SR */
4046235783Skib		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4047235783Skib					pineview_display_hplloff_wm.fifo_size,
4048235783Skib					pixel_size, latency->display_hpll_disable);
4049235783Skib		reg = I915_READ(DSPFW3);
4050235783Skib		reg &= ~DSPFW_HPLL_SR_MASK;
4051235783Skib		reg |= wm & DSPFW_HPLL_SR_MASK;
4052235783Skib		I915_WRITE(DSPFW3, reg);
4053235783Skib
4054235783Skib		/* cursor HPLL off SR */
4055235783Skib		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4056235783Skib					pineview_display_hplloff_wm.fifo_size,
4057235783Skib					pixel_size, latency->cursor_hpll_disable);
4058235783Skib		reg = I915_READ(DSPFW3);
4059235783Skib		reg &= ~DSPFW_HPLL_CURSOR_MASK;
4060235783Skib		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4061235783Skib		I915_WRITE(DSPFW3, reg);
4062235783Skib		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4063235783Skib
4064235783Skib		/* activate cxsr */
4065235783Skib		I915_WRITE(DSPFW3,
4066235783Skib			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4067235783Skib		DRM_DEBUG_KMS("Self-refresh is enabled\n");
4068235783Skib	} else {
4069235783Skib		pineview_disable_cxsr(dev);
4070235783Skib		DRM_DEBUG_KMS("Self-refresh is disabled\n");
4071235783Skib	}
4072235783Skib}
4073235783Skib
4074235783Skibstatic bool g4x_compute_wm0(struct drm_device *dev,
4075235783Skib			    int plane,
4076235783Skib			    const struct intel_watermark_params *display,
4077235783Skib			    int display_latency_ns,
4078235783Skib			    const struct intel_watermark_params *cursor,
4079235783Skib			    int cursor_latency_ns,
4080235783Skib			    int *plane_wm,
4081235783Skib			    int *cursor_wm)
4082235783Skib{
4083235783Skib	struct drm_crtc *crtc;
4084235783Skib	int htotal, hdisplay, clock, pixel_size;
4085235783Skib	int line_time_us, line_count;
4086235783Skib	int entries, tlb_miss;
4087235783Skib
4088235783Skib	crtc = intel_get_crtc_for_plane(dev, plane);
4089235783Skib	if (crtc->fb == NULL || !crtc->enabled) {
4090235783Skib		*cursor_wm = cursor->guard_size;
4091235783Skib		*plane_wm = display->guard_size;
4092235783Skib		return false;
4093235783Skib	}
4094235783Skib
4095235783Skib	htotal = crtc->mode.htotal;
4096235783Skib	hdisplay = crtc->mode.hdisplay;
4097235783Skib	clock = crtc->mode.clock;
4098235783Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
4099235783Skib
4100235783Skib	/* Use the small buffer method to calculate plane watermark */
4101235783Skib	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4102235783Skib	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4103235783Skib	if (tlb_miss > 0)
4104235783Skib		entries += tlb_miss;
4105235783Skib	entries = howmany(entries, display->cacheline_size);
4106235783Skib	*plane_wm = entries + display->guard_size;
4107235783Skib	if (*plane_wm > (int)display->max_wm)
4108235783Skib		*plane_wm = display->max_wm;
4109235783Skib
4110235783Skib	/* Use the large buffer method to calculate cursor watermark */
4111235783Skib	line_time_us = ((htotal * 1000) / clock);
4112235783Skib	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4113235783Skib	entries = line_count * 64 * pixel_size;
4114235783Skib	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4115235783Skib	if (tlb_miss > 0)
4116235783Skib		entries += tlb_miss;
4117235783Skib	entries = howmany(entries, cursor->cacheline_size);
4118235783Skib	*cursor_wm = entries + cursor->guard_size;
4119235783Skib	if (*cursor_wm > (int)cursor->max_wm)
4120235783Skib		*cursor_wm = (int)cursor->max_wm;
4121235783Skib
4122235783Skib	return true;
4123235783Skib}
4124235783Skib
4125235783Skib/*
4126235783Skib * Check the wm result.
4127235783Skib *
4128235783Skib * If any calculated watermark values is larger than the maximum value that
4129235783Skib * can be programmed into the associated watermark register, that watermark
4130235783Skib * must be disabled.
4131235783Skib */
4132235783Skibstatic bool g4x_check_srwm(struct drm_device *dev,
4133235783Skib			   int display_wm, int cursor_wm,
4134235783Skib			   const struct intel_watermark_params *display,
4135235783Skib			   const struct intel_watermark_params *cursor)
4136235783Skib{
4137235783Skib	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4138235783Skib		      display_wm, cursor_wm);
4139235783Skib
4140235783Skib	if (display_wm > display->max_wm) {
4141235783Skib		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4142235783Skib			      display_wm, display->max_wm);
4143235783Skib		return false;
4144235783Skib	}
4145235783Skib
4146235783Skib	if (cursor_wm > cursor->max_wm) {
4147235783Skib		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4148235783Skib			      cursor_wm, cursor->max_wm);
4149235783Skib		return false;
4150235783Skib	}
4151235783Skib
4152235783Skib	if (!(display_wm || cursor_wm)) {
4153235783Skib		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4154235783Skib		return false;
4155235783Skib	}
4156235783Skib
4157235783Skib	return true;
4158235783Skib}
4159235783Skib
4160235783Skibstatic bool g4x_compute_srwm(struct drm_device *dev,
4161235783Skib			     int plane,
4162235783Skib			     int latency_ns,
4163235783Skib			     const struct intel_watermark_params *display,
4164235783Skib			     const struct intel_watermark_params *cursor,
4165235783Skib			     int *display_wm, int *cursor_wm)
4166235783Skib{
4167235783Skib	struct drm_crtc *crtc;
4168235783Skib	int hdisplay, htotal, pixel_size, clock;
4169235783Skib	unsigned long line_time_us;
4170235783Skib	int line_count, line_size;
4171235783Skib	int small, large;
4172235783Skib	int entries;
4173235783Skib
4174235783Skib	if (!latency_ns) {
4175235783Skib		*display_wm = *cursor_wm = 0;
4176235783Skib		return false;
4177235783Skib	}
4178235783Skib
4179235783Skib	crtc = intel_get_crtc_for_plane(dev, plane);
4180235783Skib	hdisplay = crtc->mode.hdisplay;
4181235783Skib	htotal = crtc->mode.htotal;
4182235783Skib	clock = crtc->mode.clock;
4183235783Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
4184235783Skib
4185235783Skib	line_time_us = (htotal * 1000) / clock;
4186235783Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
4187235783Skib	line_size = hdisplay * pixel_size;
4188235783Skib
4189235783Skib	/* Use the minimum of the small and large buffer method for primary */
4190235783Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4191235783Skib	large = line_count * line_size;
4192235783Skib
4193235783Skib	entries = howmany(min(small, large), display->cacheline_size);
4194235783Skib	*display_wm = entries + display->guard_size;
4195235783Skib
4196235783Skib	/* calculate the self-refresh watermark for display cursor */
4197235783Skib	entries = line_count * pixel_size * 64;
4198235783Skib	entries = howmany(entries, cursor->cacheline_size);
4199235783Skib	*cursor_wm = entries + cursor->guard_size;
4200235783Skib
4201235783Skib	return g4x_check_srwm(dev,
4202235783Skib			      *display_wm, *cursor_wm,
4203235783Skib			      display, cursor);
4204235783Skib}
4205235783Skib
4206235783Skib#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
4207235783Skib
4208235783Skibstatic void g4x_update_wm(struct drm_device *dev)
4209235783Skib{
4210235783Skib	static const int sr_latency_ns = 12000;
4211235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4212235783Skib	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4213235783Skib	int plane_sr, cursor_sr;
4214235783Skib	unsigned int enabled = 0;
4215235783Skib
4216235783Skib	if (g4x_compute_wm0(dev, 0,
4217235783Skib			    &g4x_wm_info, latency_ns,
4218235783Skib			    &g4x_cursor_wm_info, latency_ns,
4219235783Skib			    &planea_wm, &cursora_wm))
4220235783Skib		enabled |= 1;
4221235783Skib
4222235783Skib	if (g4x_compute_wm0(dev, 1,
4223235783Skib			    &g4x_wm_info, latency_ns,
4224235783Skib			    &g4x_cursor_wm_info, latency_ns,
4225235783Skib			    &planeb_wm, &cursorb_wm))
4226235783Skib		enabled |= 2;
4227235783Skib
4228235783Skib	plane_sr = cursor_sr = 0;
4229235783Skib	if (single_plane_enabled(enabled) &&
4230235783Skib	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4231235783Skib			     sr_latency_ns,
4232235783Skib			     &g4x_wm_info,
4233235783Skib			     &g4x_cursor_wm_info,
4234235783Skib			     &plane_sr, &cursor_sr))
4235235783Skib		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4236235783Skib	else
4237235783Skib		I915_WRITE(FW_BLC_SELF,
4238235783Skib			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4239235783Skib
4240235783Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4241235783Skib		      planea_wm, cursora_wm,
4242235783Skib		      planeb_wm, cursorb_wm,
4243235783Skib		      plane_sr, cursor_sr);
4244235783Skib
4245235783Skib	I915_WRITE(DSPFW1,
4246235783Skib		   (plane_sr << DSPFW_SR_SHIFT) |
4247235783Skib		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4248235783Skib		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4249235783Skib		   planea_wm);
4250235783Skib	I915_WRITE(DSPFW2,
4251235783Skib		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4252235783Skib		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4253235783Skib	/* HPLL off in SR has some issues on G4x... disable it */
4254235783Skib	I915_WRITE(DSPFW3,
4255235783Skib		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4256235783Skib		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4257235783Skib}
4258235783Skib
4259235783Skibstatic void i965_update_wm(struct drm_device *dev)
4260235783Skib{
4261235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4262235783Skib	struct drm_crtc *crtc;
4263235783Skib	int srwm = 1;
4264235783Skib	int cursor_sr = 16;
4265235783Skib
4266235783Skib	/* Calc sr entries for one plane configs */
4267235783Skib	crtc = single_enabled_crtc(dev);
4268235783Skib	if (crtc) {
4269235783Skib		/* self-refresh has much higher latency */
4270235783Skib		static const int sr_latency_ns = 12000;
4271235783Skib		int clock = crtc->mode.clock;
4272235783Skib		int htotal = crtc->mode.htotal;
4273235783Skib		int hdisplay = crtc->mode.hdisplay;
4274235783Skib		int pixel_size = crtc->fb->bits_per_pixel / 8;
4275235783Skib		unsigned long line_time_us;
4276235783Skib		int entries;
4277235783Skib
4278235783Skib		line_time_us = ((htotal * 1000) / clock);
4279235783Skib
4280235783Skib		/* Use ns/us then divide to preserve precision */
4281235783Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4282235783Skib			pixel_size * hdisplay;
4283235783Skib		entries = howmany(entries, I915_FIFO_LINE_SIZE);
4284235783Skib		srwm = I965_FIFO_SIZE - entries;
4285235783Skib		if (srwm < 0)
4286235783Skib			srwm = 1;
4287235783Skib		srwm &= 0x1ff;
4288235783Skib		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4289235783Skib			      entries, srwm);
4290235783Skib
4291235783Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4292235783Skib			pixel_size * 64;
4293235783Skib		entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
4294235783Skib		cursor_sr = i965_cursor_wm_info.fifo_size -
4295235783Skib			(entries + i965_cursor_wm_info.guard_size);
4296235783Skib
4297235783Skib		if (cursor_sr > i965_cursor_wm_info.max_wm)
4298235783Skib			cursor_sr = i965_cursor_wm_info.max_wm;
4299235783Skib
4300235783Skib		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4301235783Skib			      "cursor %d\n", srwm, cursor_sr);
4302235783Skib
4303235783Skib		if (IS_CRESTLINE(dev))
4304235783Skib			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4305235783Skib	} else {
4306235783Skib		/* Turn off self refresh if both pipes are enabled */
4307235783Skib		if (IS_CRESTLINE(dev))
4308235783Skib			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4309235783Skib				   & ~FW_BLC_SELF_EN);
4310235783Skib	}
4311235783Skib
4312235783Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4313235783Skib		      srwm);
4314235783Skib
4315235783Skib	/* 965 has limitations... */
4316235783Skib	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4317235783Skib		   (8 << 16) | (8 << 8) | (8 << 0));
4318235783Skib	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4319235783Skib	/* update cursor SR watermark */
4320235783Skib	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4321235783Skib}
4322235783Skib
4323235783Skibstatic void i9xx_update_wm(struct drm_device *dev)
4324235783Skib{
4325235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4326235783Skib	const struct intel_watermark_params *wm_info;
4327235783Skib	uint32_t fwater_lo;
4328235783Skib	uint32_t fwater_hi;
4329235783Skib	int cwm, srwm = 1;
4330235783Skib	int fifo_size;
4331235783Skib	int planea_wm, planeb_wm;
4332235783Skib	struct drm_crtc *crtc, *enabled = NULL;
4333235783Skib
4334235783Skib	if (IS_I945GM(dev))
4335235783Skib		wm_info = &i945_wm_info;
4336235783Skib	else if (!IS_GEN2(dev))
4337235783Skib		wm_info = &i915_wm_info;
4338235783Skib	else
4339235783Skib		wm_info = &i855_wm_info;
4340235783Skib
4341235783Skib	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4342235783Skib	crtc = intel_get_crtc_for_plane(dev, 0);
4343235783Skib	if (crtc->enabled && crtc->fb) {
4344235783Skib		planea_wm = intel_calculate_wm(crtc->mode.clock,
4345235783Skib					       wm_info, fifo_size,
4346235783Skib					       crtc->fb->bits_per_pixel / 8,
4347235783Skib					       latency_ns);
4348235783Skib		enabled = crtc;
4349235783Skib	} else
4350235783Skib		planea_wm = fifo_size - wm_info->guard_size;
4351235783Skib
4352235783Skib	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4353235783Skib	crtc = intel_get_crtc_for_plane(dev, 1);
4354235783Skib	if (crtc->enabled && crtc->fb) {
4355235783Skib		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4356235783Skib					       wm_info, fifo_size,
4357235783Skib					       crtc->fb->bits_per_pixel / 8,
4358235783Skib					       latency_ns);
4359235783Skib		if (enabled == NULL)
4360235783Skib			enabled = crtc;
4361235783Skib		else
4362235783Skib			enabled = NULL;
4363235783Skib	} else
4364235783Skib		planeb_wm = fifo_size - wm_info->guard_size;
4365235783Skib
4366235783Skib	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4367235783Skib
4368235783Skib	/*
4369235783Skib	 * Overlay gets an aggressive default since video jitter is bad.
4370235783Skib	 */
4371235783Skib	cwm = 2;
4372235783Skib
4373235783Skib	/* Play safe and disable self-refresh before adjusting watermarks. */
4374235783Skib	if (IS_I945G(dev) || IS_I945GM(dev))
4375235783Skib		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4376235783Skib	else if (IS_I915GM(dev))
4377235783Skib		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4378235783Skib
4379235783Skib	/* Calc sr entries for one plane configs */
4380235783Skib	if (HAS_FW_BLC(dev) && enabled) {
4381235783Skib		/* self-refresh has much higher latency */
4382235783Skib		static const int sr_latency_ns = 6000;
4383235783Skib		int clock = enabled->mode.clock;
4384235783Skib		int htotal = enabled->mode.htotal;
4385235783Skib		int hdisplay = enabled->mode.hdisplay;
4386235783Skib		int pixel_size = enabled->fb->bits_per_pixel / 8;
4387235783Skib		unsigned long line_time_us;
4388235783Skib		int entries;
4389235783Skib
4390235783Skib		line_time_us = (htotal * 1000) / clock;
4391235783Skib
4392235783Skib		/* Use ns/us then divide to preserve precision */
4393235783Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4394235783Skib			pixel_size * hdisplay;
4395235783Skib		entries = howmany(entries, wm_info->cacheline_size);
4396235783Skib		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4397235783Skib		srwm = wm_info->fifo_size - entries;
4398235783Skib		if (srwm < 0)
4399235783Skib			srwm = 1;
4400235783Skib
4401235783Skib		if (IS_I945G(dev) || IS_I945GM(dev))
4402235783Skib			I915_WRITE(FW_BLC_SELF,
4403235783Skib				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4404235783Skib		else if (IS_I915GM(dev))
4405235783Skib			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4406235783Skib	}
4407235783Skib
4408235783Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4409235783Skib		      planea_wm, planeb_wm, cwm, srwm);
4410235783Skib
4411235783Skib	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4412235783Skib	fwater_hi = (cwm & 0x1f);
4413235783Skib
4414235783Skib	/* Set request length to 8 cachelines per fetch */
4415235783Skib	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4416235783Skib	fwater_hi = fwater_hi | (1 << 8);
4417235783Skib
4418235783Skib	I915_WRITE(FW_BLC, fwater_lo);
4419235783Skib	I915_WRITE(FW_BLC2, fwater_hi);
4420235783Skib
4421235783Skib	if (HAS_FW_BLC(dev)) {
4422235783Skib		if (enabled) {
4423235783Skib			if (IS_I945G(dev) || IS_I945GM(dev))
4424235783Skib				I915_WRITE(FW_BLC_SELF,
4425235783Skib					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4426235783Skib			else if (IS_I915GM(dev))
4427235783Skib				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4428235783Skib			DRM_DEBUG_KMS("memory self refresh enabled\n");
4429235783Skib		} else
4430235783Skib			DRM_DEBUG_KMS("memory self refresh disabled\n");
4431235783Skib	}
4432235783Skib}
4433235783Skib
4434235783Skibstatic void i830_update_wm(struct drm_device *dev)
4435235783Skib{
4436235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4437235783Skib	struct drm_crtc *crtc;
4438235783Skib	uint32_t fwater_lo;
4439235783Skib	int planea_wm;
4440235783Skib
4441235783Skib	crtc = single_enabled_crtc(dev);
4442235783Skib	if (crtc == NULL)
4443235783Skib		return;
4444235783Skib
4445235783Skib	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4446235783Skib				       dev_priv->display.get_fifo_size(dev, 0),
4447235783Skib				       crtc->fb->bits_per_pixel / 8,
4448235783Skib				       latency_ns);
4449235783Skib	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4450235783Skib	fwater_lo |= (3<<8) | planea_wm;
4451235783Skib
4452235783Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4453235783Skib
4454235783Skib	I915_WRITE(FW_BLC, fwater_lo);
4455235783Skib}
4456235783Skib
4457235783Skib#define ILK_LP0_PLANE_LATENCY		700
4458235783Skib#define ILK_LP0_CURSOR_LATENCY		1300
4459235783Skib
4460235783Skib/*
4461235783Skib * Check the wm result.
4462235783Skib *
4463235783Skib * If any calculated watermark values is larger than the maximum value that
4464235783Skib * can be programmed into the associated watermark register, that watermark
4465235783Skib * must be disabled.
4466235783Skib */
4467235783Skibstatic bool ironlake_check_srwm(struct drm_device *dev, int level,
4468235783Skib				int fbc_wm, int display_wm, int cursor_wm,
4469235783Skib				const struct intel_watermark_params *display,
4470235783Skib				const struct intel_watermark_params *cursor)
4471235783Skib{
4472235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4473235783Skib
4474235783Skib	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4475235783Skib		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4476235783Skib
4477235783Skib	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4478235783Skib		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4479235783Skib			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4480235783Skib
4481235783Skib		/* fbc has it's own way to disable FBC WM */
4482235783Skib		I915_WRITE(DISP_ARB_CTL,
4483235783Skib			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4484235783Skib		return false;
4485235783Skib	}
4486235783Skib
4487235783Skib	if (display_wm > display->max_wm) {
4488235783Skib		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4489235783Skib			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4490235783Skib		return false;
4491235783Skib	}
4492235783Skib
4493235783Skib	if (cursor_wm > cursor->max_wm) {
4494235783Skib		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4495235783Skib			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4496235783Skib		return false;
4497235783Skib	}
4498235783Skib
4499235783Skib	if (!(fbc_wm || display_wm || cursor_wm)) {
4500235783Skib		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4501235783Skib		return false;
4502235783Skib	}
4503235783Skib
4504235783Skib	return true;
4505235783Skib}
4506235783Skib
4507235783Skib/*
4508235783Skib * Compute watermark values of WM[1-3],
4509235783Skib */
4510235783Skibstatic bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4511235783Skib				  int latency_ns,
4512235783Skib				  const struct intel_watermark_params *display,
4513235783Skib				  const struct intel_watermark_params *cursor,
4514235783Skib				  int *fbc_wm, int *display_wm, int *cursor_wm)
4515235783Skib{
4516235783Skib	struct drm_crtc *crtc;
4517235783Skib	unsigned long line_time_us;
4518235783Skib	int hdisplay, htotal, pixel_size, clock;
4519235783Skib	int line_count, line_size;
4520235783Skib	int small, large;
4521235783Skib	int entries;
4522235783Skib
4523235783Skib	if (!latency_ns) {
4524235783Skib		*fbc_wm = *display_wm = *cursor_wm = 0;
4525235783Skib		return false;
4526235783Skib	}
4527235783Skib
4528235783Skib	crtc = intel_get_crtc_for_plane(dev, plane);
4529235783Skib	hdisplay = crtc->mode.hdisplay;
4530235783Skib	htotal = crtc->mode.htotal;
4531235783Skib	clock = crtc->mode.clock;
4532235783Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
4533235783Skib
4534235783Skib	line_time_us = (htotal * 1000) / clock;
4535235783Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
4536235783Skib	line_size = hdisplay * pixel_size;
4537235783Skib
4538235783Skib	/* Use the minimum of the small and large buffer method for primary */
4539235783Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4540235783Skib	large = line_count * line_size;
4541235783Skib
4542235783Skib	entries = howmany(min(small, large), display->cacheline_size);
4543235783Skib	*display_wm = entries + display->guard_size;
4544235783Skib
4545235783Skib	/*
4546235783Skib	 * Spec says:
4547235783Skib	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4548235783Skib	 */
4549235783Skib	*fbc_wm = howmany(*display_wm * 64, line_size) + 2;
4550235783Skib
4551235783Skib	/* calculate the self-refresh watermark for display cursor */
4552235783Skib	entries = line_count * pixel_size * 64;
4553235783Skib	entries = howmany(entries, cursor->cacheline_size);
4554235783Skib	*cursor_wm = entries + cursor->guard_size;
4555235783Skib
4556235783Skib	return ironlake_check_srwm(dev, level,
4557235783Skib				   *fbc_wm, *display_wm, *cursor_wm,
4558235783Skib				   display, cursor);
4559235783Skib}
4560235783Skib
4561235783Skibstatic void ironlake_update_wm(struct drm_device *dev)
4562235783Skib{
4563235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4564235783Skib	int fbc_wm, plane_wm, cursor_wm;
4565235783Skib	unsigned int enabled;
4566235783Skib
4567235783Skib	enabled = 0;
4568235783Skib	if (g4x_compute_wm0(dev, 0,
4569235783Skib			    &ironlake_display_wm_info,
4570235783Skib			    ILK_LP0_PLANE_LATENCY,
4571235783Skib			    &ironlake_cursor_wm_info,
4572235783Skib			    ILK_LP0_CURSOR_LATENCY,
4573235783Skib			    &plane_wm, &cursor_wm)) {
4574235783Skib		I915_WRITE(WM0_PIPEA_ILK,
4575235783Skib			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4576235783Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4577235783Skib			      " plane %d, " "cursor: %d\n",
4578235783Skib			      plane_wm, cursor_wm);
4579235783Skib		enabled |= 1;
4580235783Skib	}
4581235783Skib
4582235783Skib	if (g4x_compute_wm0(dev, 1,
4583235783Skib			    &ironlake_display_wm_info,
4584235783Skib			    ILK_LP0_PLANE_LATENCY,
4585235783Skib			    &ironlake_cursor_wm_info,
4586235783Skib			    ILK_LP0_CURSOR_LATENCY,
4587235783Skib			    &plane_wm, &cursor_wm)) {
4588235783Skib		I915_WRITE(WM0_PIPEB_ILK,
4589235783Skib			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4590235783Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4591235783Skib			      " plane %d, cursor: %d\n",
4592235783Skib			      plane_wm, cursor_wm);
4593235783Skib		enabled |= 2;
4594235783Skib	}
4595235783Skib
4596235783Skib	/*
4597235783Skib	 * Calculate and update the self-refresh watermark only when one
4598235783Skib	 * display plane is used.
4599235783Skib	 */
4600235783Skib	I915_WRITE(WM3_LP_ILK, 0);
4601235783Skib	I915_WRITE(WM2_LP_ILK, 0);
4602235783Skib	I915_WRITE(WM1_LP_ILK, 0);
4603235783Skib
4604235783Skib	if (!single_plane_enabled(enabled))
4605235783Skib		return;
4606235783Skib	enabled = ffs(enabled) - 1;
4607235783Skib
4608235783Skib	/* WM1 */
4609235783Skib	if (!ironlake_compute_srwm(dev, 1, enabled,
4610235783Skib				   ILK_READ_WM1_LATENCY() * 500,
4611235783Skib				   &ironlake_display_srwm_info,
4612235783Skib				   &ironlake_cursor_srwm_info,
4613235783Skib				   &fbc_wm, &plane_wm, &cursor_wm))
4614235783Skib		return;
4615235783Skib
4616235783Skib	I915_WRITE(WM1_LP_ILK,
4617235783Skib		   WM1_LP_SR_EN |
4618235783Skib		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4619235783Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4620235783Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
4621235783Skib		   cursor_wm);
4622235783Skib
4623235783Skib	/* WM2 */
4624235783Skib	if (!ironlake_compute_srwm(dev, 2, enabled,
4625235783Skib				   ILK_READ_WM2_LATENCY() * 500,
4626235783Skib				   &ironlake_display_srwm_info,
4627235783Skib				   &ironlake_cursor_srwm_info,
4628235783Skib				   &fbc_wm, &plane_wm, &cursor_wm))
4629235783Skib		return;
4630235783Skib
4631235783Skib	I915_WRITE(WM2_LP_ILK,
4632235783Skib		   WM2_LP_EN |
4633235783Skib		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4634235783Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4635235783Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
4636235783Skib		   cursor_wm);
4637235783Skib
4638235783Skib	/*
4639235783Skib	 * WM3 is unsupported on ILK, probably because we don't have latency
4640235783Skib	 * data for that power state
4641235783Skib	 */
4642235783Skib}
4643235783Skib
4644235783Skibvoid sandybridge_update_wm(struct drm_device *dev)
4645235783Skib{
4646235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4647235783Skib	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4648235783Skib	u32 val;
4649235783Skib	int fbc_wm, plane_wm, cursor_wm;
4650235783Skib	unsigned int enabled;
4651235783Skib
4652235783Skib	enabled = 0;
4653235783Skib	if (g4x_compute_wm0(dev, 0,
4654235783Skib			    &sandybridge_display_wm_info, latency,
4655235783Skib			    &sandybridge_cursor_wm_info, latency,
4656235783Skib			    &plane_wm, &cursor_wm)) {
4657235783Skib		val = I915_READ(WM0_PIPEA_ILK);
4658235783Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4659235783Skib		I915_WRITE(WM0_PIPEA_ILK, val |
4660235783Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4661235783Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4662235783Skib			      " plane %d, " "cursor: %d\n",
4663235783Skib			      plane_wm, cursor_wm);
4664235783Skib		enabled |= 1;
4665235783Skib	}
4666235783Skib
4667235783Skib	if (g4x_compute_wm0(dev, 1,
4668235783Skib			    &sandybridge_display_wm_info, latency,
4669235783Skib			    &sandybridge_cursor_wm_info, latency,
4670235783Skib			    &plane_wm, &cursor_wm)) {
4671235783Skib		val = I915_READ(WM0_PIPEB_ILK);
4672235783Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4673235783Skib		I915_WRITE(WM0_PIPEB_ILK, val |
4674235783Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4675235783Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4676235783Skib			      " plane %d, cursor: %d\n",
4677235783Skib			      plane_wm, cursor_wm);
4678235783Skib		enabled |= 2;
4679235783Skib	}
4680235783Skib
4681235783Skib	/* IVB has 3 pipes */
4682235783Skib	if (IS_IVYBRIDGE(dev) &&
4683235783Skib	    g4x_compute_wm0(dev, 2,
4684235783Skib			    &sandybridge_display_wm_info, latency,
4685235783Skib			    &sandybridge_cursor_wm_info, latency,
4686235783Skib			    &plane_wm, &cursor_wm)) {
4687235783Skib		val = I915_READ(WM0_PIPEC_IVB);
4688235783Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4689235783Skib		I915_WRITE(WM0_PIPEC_IVB, val |
4690235783Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4691235783Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4692235783Skib			      " plane %d, cursor: %d\n",
4693235783Skib			      plane_wm, cursor_wm);
4694235783Skib		enabled |= 3;
4695235783Skib	}
4696235783Skib
4697235783Skib	/*
4698235783Skib	 * Calculate and update the self-refresh watermark only when one
4699235783Skib	 * display plane is used.
4700235783Skib	 *
4701235783Skib	 * SNB support 3 levels of watermark.
4702235783Skib	 *
4703235783Skib	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4704235783Skib	 * and disabled in the descending order
4705235783Skib	 *
4706235783Skib	 */
4707235783Skib	I915_WRITE(WM3_LP_ILK, 0);
4708235783Skib	I915_WRITE(WM2_LP_ILK, 0);
4709235783Skib	I915_WRITE(WM1_LP_ILK, 0);
4710235783Skib
4711235783Skib	if (!single_plane_enabled(enabled) ||
4712235783Skib	    dev_priv->sprite_scaling_enabled)
4713235783Skib		return;
4714235783Skib	enabled = ffs(enabled) - 1;
4715235783Skib
4716235783Skib	/* WM1 */
4717235783Skib	if (!ironlake_compute_srwm(dev, 1, enabled,
4718235783Skib				   SNB_READ_WM1_LATENCY() * 500,
4719235783Skib				   &sandybridge_display_srwm_info,
4720235783Skib				   &sandybridge_cursor_srwm_info,
4721235783Skib				   &fbc_wm, &plane_wm, &cursor_wm))
4722235783Skib		return;
4723235783Skib
4724235783Skib	I915_WRITE(WM1_LP_ILK,
4725235783Skib		   WM1_LP_SR_EN |
4726235783Skib		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4727235783Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4728235783Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
4729235783Skib		   cursor_wm);
4730235783Skib
4731235783Skib	/* WM2 */
4732235783Skib	if (!ironlake_compute_srwm(dev, 2, enabled,
4733235783Skib				   SNB_READ_WM2_LATENCY() * 500,
4734235783Skib				   &sandybridge_display_srwm_info,
4735235783Skib				   &sandybridge_cursor_srwm_info,
4736235783Skib				   &fbc_wm, &plane_wm, &cursor_wm))
4737235783Skib		return;
4738235783Skib
4739235783Skib	I915_WRITE(WM2_LP_ILK,
4740235783Skib		   WM2_LP_EN |
4741235783Skib		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4742235783Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4743235783Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
4744235783Skib		   cursor_wm);
4745235783Skib
4746235783Skib	/* WM3 */
4747235783Skib	if (!ironlake_compute_srwm(dev, 3, enabled,
4748235783Skib				   SNB_READ_WM3_LATENCY() * 500,
4749235783Skib				   &sandybridge_display_srwm_info,
4750235783Skib				   &sandybridge_cursor_srwm_info,
4751235783Skib				   &fbc_wm, &plane_wm, &cursor_wm))
4752235783Skib		return;
4753235783Skib
4754235783Skib	I915_WRITE(WM3_LP_ILK,
4755235783Skib		   WM3_LP_EN |
4756235783Skib		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4757235783Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4758235783Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
4759235783Skib		   cursor_wm);
4760235783Skib}
4761235783Skib
4762235783Skibstatic bool
4763235783Skibsandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4764235783Skib			      uint32_t sprite_width, int pixel_size,
4765235783Skib			      const struct intel_watermark_params *display,
4766235783Skib			      int display_latency_ns, int *sprite_wm)
4767235783Skib{
4768235783Skib	struct drm_crtc *crtc;
4769235783Skib	int clock;
4770235783Skib	int entries, tlb_miss;
4771235783Skib
4772235783Skib	crtc = intel_get_crtc_for_plane(dev, plane);
4773235783Skib	if (crtc->fb == NULL || !crtc->enabled) {
4774235783Skib		*sprite_wm = display->guard_size;
4775235783Skib		return false;
4776235783Skib	}
4777235783Skib
4778235783Skib	clock = crtc->mode.clock;
4779235783Skib
4780235783Skib	/* Use the small buffer method to calculate the sprite watermark */
4781235783Skib	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4782235783Skib	tlb_miss = display->fifo_size*display->cacheline_size -
4783235783Skib		sprite_width * 8;
4784235783Skib	if (tlb_miss > 0)
4785235783Skib		entries += tlb_miss;
4786235783Skib	entries = howmany(entries, display->cacheline_size);
4787235783Skib	*sprite_wm = entries + display->guard_size;
4788235783Skib	if (*sprite_wm > (int)display->max_wm)
4789235783Skib		*sprite_wm = display->max_wm;
4790235783Skib
4791235783Skib	return true;
4792235783Skib}
4793235783Skib
4794235783Skibstatic bool
4795235783Skibsandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4796235783Skib				uint32_t sprite_width, int pixel_size,
4797235783Skib				const struct intel_watermark_params *display,
4798235783Skib				int latency_ns, int *sprite_wm)
4799235783Skib{
4800235783Skib	struct drm_crtc *crtc;
4801235783Skib	unsigned long line_time_us;
4802235783Skib	int clock;
4803235783Skib	int line_count, line_size;
4804235783Skib	int small, large;
4805235783Skib	int entries;
4806235783Skib
4807235783Skib	if (!latency_ns) {
4808235783Skib		*sprite_wm = 0;
4809235783Skib		return false;
4810235783Skib	}
4811235783Skib
4812235783Skib	crtc = intel_get_crtc_for_plane(dev, plane);
4813235783Skib	clock = crtc->mode.clock;
4814235783Skib	if (!clock) {
4815235783Skib		*sprite_wm = 0;
4816235783Skib		return false;
4817235783Skib	}
4818235783Skib
4819235783Skib	line_time_us = (sprite_width * 1000) / clock;
4820235783Skib	if (!line_time_us) {
4821235783Skib		*sprite_wm = 0;
4822235783Skib		return false;
4823235783Skib	}
4824235783Skib
4825235783Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
4826235783Skib	line_size = sprite_width * pixel_size;
4827235783Skib
4828235783Skib	/* Use the minimum of the small and large buffer method for primary */
4829235783Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4830235783Skib	large = line_count * line_size;
4831235783Skib
4832235783Skib	entries = howmany(min(small, large), display->cacheline_size);
4833235783Skib	*sprite_wm = entries + display->guard_size;
4834235783Skib
4835235783Skib	return *sprite_wm > 0x3ff ? false : true;
4836235783Skib}
4837235783Skib
4838235783Skibstatic void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4839235783Skib					 uint32_t sprite_width, int pixel_size)
4840235783Skib{
4841235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4842235783Skib	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4843235783Skib	u32 val;
4844235783Skib	int sprite_wm, reg;
4845235783Skib	int ret;
4846235783Skib
4847235783Skib	switch (pipe) {
4848235783Skib	case 0:
4849235783Skib		reg = WM0_PIPEA_ILK;
4850235783Skib		break;
4851235783Skib	case 1:
4852235783Skib		reg = WM0_PIPEB_ILK;
4853235783Skib		break;
4854235783Skib	case 2:
4855235783Skib		reg = WM0_PIPEC_IVB;
4856235783Skib		break;
4857235783Skib	default:
4858235783Skib		return; /* bad pipe */
4859235783Skib	}
4860235783Skib
4861235783Skib	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4862235783Skib					    &sandybridge_display_wm_info,
4863235783Skib					    latency, &sprite_wm);
4864235783Skib	if (!ret) {
4865235783Skib		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4866235783Skib			      pipe);
4867235783Skib		return;
4868235783Skib	}
4869235783Skib
4870235783Skib	val = I915_READ(reg);
4871235783Skib	val &= ~WM0_PIPE_SPRITE_MASK;
4872235783Skib	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4873235783Skib	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4874235783Skib
4875235783Skib
4876235783Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4877235783Skib					      pixel_size,
4878235783Skib					      &sandybridge_display_srwm_info,
4879235783Skib					      SNB_READ_WM1_LATENCY() * 500,
4880235783Skib					      &sprite_wm);
4881235783Skib	if (!ret) {
4882235783Skib		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4883235783Skib			      pipe);
4884235783Skib		return;
4885235783Skib	}
4886235783Skib	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4887235783Skib
4888235783Skib	/* Only IVB has two more LP watermarks for sprite */
4889235783Skib	if (!IS_IVYBRIDGE(dev))
4890235783Skib		return;
4891235783Skib
4892235783Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4893235783Skib					      pixel_size,
4894235783Skib					      &sandybridge_display_srwm_info,
4895235783Skib					      SNB_READ_WM2_LATENCY() * 500,
4896235783Skib					      &sprite_wm);
4897235783Skib	if (!ret) {
4898235783Skib		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4899235783Skib			      pipe);
4900235783Skib		return;
4901235783Skib	}
4902235783Skib	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4903235783Skib
4904235783Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4905235783Skib					      pixel_size,
4906235783Skib					      &sandybridge_display_srwm_info,
4907235783Skib					      SNB_READ_WM3_LATENCY() * 500,
4908235783Skib					      &sprite_wm);
4909235783Skib	if (!ret) {
4910235783Skib		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4911235783Skib			      pipe);
4912235783Skib		return;
4913235783Skib	}
4914235783Skib	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4915235783Skib}
4916235783Skib
4917235783Skib/**
4918235783Skib * intel_update_watermarks - update FIFO watermark values based on current modes
4919235783Skib *
4920235783Skib * Calculate watermark values for the various WM regs based on current mode
4921235783Skib * and plane configuration.
4922235783Skib *
4923235783Skib * There are several cases to deal with here:
4924235783Skib *   - normal (i.e. non-self-refresh)
4925235783Skib *   - self-refresh (SR) mode
4926235783Skib *   - lines are large relative to FIFO size (buffer can hold up to 2)
4927235783Skib *   - lines are small relative to FIFO size (buffer can hold more than 2
4928235783Skib *     lines), so need to account for TLB latency
4929235783Skib *
4930235783Skib *   The normal calculation is:
4931235783Skib *     watermark = dotclock * bytes per pixel * latency
4932235783Skib *   where latency is platform & configuration dependent (we assume pessimal
4933235783Skib *   values here).
4934235783Skib *
4935235783Skib *   The SR calculation is:
4936235783Skib *     watermark = (trunc(latency/line time)+1) * surface width *
4937235783Skib *       bytes per pixel
4938235783Skib *   where
4939235783Skib *     line time = htotal / dotclock
4940235783Skib *     surface width = hdisplay for normal plane and 64 for cursor
4941235783Skib *   and latency is assumed to be high, as above.
4942235783Skib *
4943235783Skib * The final value programmed to the register should always be rounded up,
4944235783Skib * and include an extra 2 entries to account for clock crossings.
4945235783Skib *
4946235783Skib * We don't use the sprite, so we can ignore that.  And on Crestline we have
4947235783Skib * to set the non-SR watermarks to 8.
4948235783Skib */
4949235783Skibstatic void intel_update_watermarks(struct drm_device *dev)
4950235783Skib{
4951235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4952235783Skib
4953235783Skib	if (dev_priv->display.update_wm)
4954235783Skib		dev_priv->display.update_wm(dev);
4955235783Skib}
4956235783Skib
4957235783Skibvoid intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4958235783Skib				    uint32_t sprite_width, int pixel_size)
4959235783Skib{
4960235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
4961235783Skib
4962235783Skib	if (dev_priv->display.update_sprite_wm)
4963235783Skib		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4964235783Skib						   pixel_size);
4965235783Skib}
4966235783Skib
4967235783Skibstatic inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4968235783Skib{
4969235783Skib	if (i915_panel_use_ssc >= 0)
4970235783Skib		return i915_panel_use_ssc != 0;
4971235783Skib	return dev_priv->lvds_use_ssc
4972235783Skib		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4973235783Skib}
4974235783Skib
4975235783Skib/**
4976235783Skib * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4977235783Skib * @crtc: CRTC structure
4978235783Skib * @mode: requested mode
4979235783Skib *
4980235783Skib * A pipe may be connected to one or more outputs.  Based on the depth of the
4981235783Skib * attached framebuffer, choose a good color depth to use on the pipe.
4982235783Skib *
4983235783Skib * If possible, match the pipe depth to the fb depth.  In some cases, this
4984235783Skib * isn't ideal, because the connected output supports a lesser or restricted
4985235783Skib * set of depths.  Resolve that here:
4986235783Skib *    LVDS typically supports only 6bpc, so clamp down in that case
4987235783Skib *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4988235783Skib *    Displays may support a restricted set as well, check EDID and clamp as
4989235783Skib *      appropriate.
4990235783Skib *    DP may want to dither down to 6bpc to fit larger modes
4991235783Skib *
4992235783Skib * RETURNS:
4993235783Skib * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4994235783Skib * true if they don't match).
4995235783Skib */
4996235783Skibstatic bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4997235783Skib					 unsigned int *pipe_bpp,
4998235783Skib					 struct drm_display_mode *mode)
4999235783Skib{
5000235783Skib	struct drm_device *dev = crtc->dev;
5001235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5002235783Skib	struct drm_encoder *encoder;
5003235783Skib	struct drm_connector *connector;
5004235783Skib	unsigned int display_bpc = UINT_MAX, bpc;
5005235783Skib
5006235783Skib	/* Walk the encoders & connectors on this crtc, get min bpc */
5007235783Skib	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
5008235783Skib		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5009235783Skib
5010235783Skib		if (encoder->crtc != crtc)
5011235783Skib			continue;
5012235783Skib
5013235783Skib		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
5014235783Skib			unsigned int lvds_bpc;
5015235783Skib
5016235783Skib			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
5017235783Skib			    LVDS_A3_POWER_UP)
5018235783Skib				lvds_bpc = 8;
5019235783Skib			else
5020235783Skib				lvds_bpc = 6;
5021235783Skib
5022235783Skib			if (lvds_bpc < display_bpc) {
5023235783Skib				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5024235783Skib				display_bpc = lvds_bpc;
5025235783Skib			}
5026235783Skib			continue;
5027235783Skib		}
5028235783Skib
5029235783Skib		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5030235783Skib			/* Use VBT settings if we have an eDP panel */
5031235783Skib			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5032235783Skib
5033235783Skib			if (edp_bpc < display_bpc) {
5034235783Skib				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5035235783Skib				display_bpc = edp_bpc;
5036235783Skib			}
5037235783Skib			continue;
5038235783Skib		}
5039235783Skib
5040235783Skib		/* Not one of the known troublemakers, check the EDID */
5041235783Skib		list_for_each_entry(connector, &dev->mode_config.connector_list,
5042235783Skib				    head) {
5043235783Skib			if (connector->encoder != encoder)
5044235783Skib				continue;
5045235783Skib
5046235783Skib			/* Don't use an invalid EDID bpc value */
5047235783Skib			if (connector->display_info.bpc &&
5048235783Skib			    connector->display_info.bpc < display_bpc) {
5049235783Skib				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5050235783Skib				display_bpc = connector->display_info.bpc;
5051235783Skib			}
5052235783Skib		}
5053235783Skib
5054235783Skib		/*
5055235783Skib		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5056235783Skib		 * through, clamp it down.  (Note: >12bpc will be caught below.)
5057235783Skib		 */
5058235783Skib		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5059235783Skib			if (display_bpc > 8 && display_bpc < 12) {
5060235783Skib				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5061235783Skib				display_bpc = 12;
5062235783Skib			} else {
5063235783Skib				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5064235783Skib				display_bpc = 8;
5065235783Skib			}
5066235783Skib		}
5067235783Skib	}
5068235783Skib
5069235783Skib	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5070235783Skib		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5071235783Skib		display_bpc = 6;
5072235783Skib	}
5073235783Skib
5074235783Skib	/*
5075235783Skib	 * We could just drive the pipe at the highest bpc all the time and
5076235783Skib	 * enable dithering as needed, but that costs bandwidth.  So choose
5077235783Skib	 * the minimum value that expresses the full color range of the fb but
5078235783Skib	 * also stays within the max display bpc discovered above.
5079235783Skib	 */
5080235783Skib
5081235783Skib	switch (crtc->fb->depth) {
5082235783Skib	case 8:
5083235783Skib		bpc = 8; /* since we go through a colormap */
5084235783Skib		break;
5085235783Skib	case 15:
5086235783Skib	case 16:
5087235783Skib		bpc = 6; /* min is 18bpp */
5088235783Skib		break;
5089235783Skib	case 24:
5090235783Skib		bpc = 8;
5091235783Skib		break;
5092235783Skib	case 30:
5093235783Skib		bpc = 10;
5094235783Skib		break;
5095235783Skib	case 48:
5096235783Skib		bpc = 12;
5097235783Skib		break;
5098235783Skib	default:
5099235783Skib		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5100235783Skib		bpc = min((unsigned int)8, display_bpc);
5101235783Skib		break;
5102235783Skib	}
5103235783Skib
5104235783Skib	display_bpc = min(display_bpc, bpc);
5105235783Skib
5106235783Skib	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5107235783Skib			 bpc, display_bpc);
5108235783Skib
5109235783Skib	*pipe_bpp = display_bpc * 3;
5110235783Skib
5111235783Skib	return display_bpc != bpc;
5112235783Skib}
5113235783Skib
5114235783Skibstatic int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5115235783Skib{
5116235783Skib	struct drm_device *dev = crtc->dev;
5117235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5118235783Skib	int refclk;
5119235783Skib
5120235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5121235783Skib	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5122235783Skib		refclk = dev_priv->lvds_ssc_freq * 1000;
5123235783Skib		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5124235783Skib			      refclk / 1000);
5125235783Skib	} else if (!IS_GEN2(dev)) {
5126235783Skib		refclk = 96000;
5127235783Skib	} else {
5128235783Skib		refclk = 48000;
5129235783Skib	}
5130235783Skib
5131235783Skib	return refclk;
5132235783Skib}
5133235783Skib
5134235783Skibstatic void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5135235783Skib				      intel_clock_t *clock)
5136235783Skib{
5137235783Skib	/* SDVO TV has fixed PLL values depend on its clock range,
5138235783Skib	   this mirrors vbios setting. */
5139235783Skib	if (adjusted_mode->clock >= 100000
5140235783Skib	    && adjusted_mode->clock < 140500) {
5141235783Skib		clock->p1 = 2;
5142235783Skib		clock->p2 = 10;
5143235783Skib		clock->n = 3;
5144235783Skib		clock->m1 = 16;
5145235783Skib		clock->m2 = 8;
5146235783Skib	} else if (adjusted_mode->clock >= 140500
5147235783Skib		   && adjusted_mode->clock <= 200000) {
5148235783Skib		clock->p1 = 1;
5149235783Skib		clock->p2 = 10;
5150235783Skib		clock->n = 6;
5151235783Skib		clock->m1 = 12;
5152235783Skib		clock->m2 = 8;
5153235783Skib	}
5154235783Skib}
5155235783Skib
5156235783Skibstatic void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5157235783Skib				     intel_clock_t *clock,
5158235783Skib				     intel_clock_t *reduced_clock)
5159235783Skib{
5160235783Skib	struct drm_device *dev = crtc->dev;
5161235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5162235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5163235783Skib	int pipe = intel_crtc->pipe;
5164235783Skib	u32 fp, fp2 = 0;
5165235783Skib
5166235783Skib	if (IS_PINEVIEW(dev)) {
5167235783Skib		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5168235783Skib		if (reduced_clock)
5169235783Skib			fp2 = (1 << reduced_clock->n) << 16 |
5170235783Skib				reduced_clock->m1 << 8 | reduced_clock->m2;
5171235783Skib	} else {
5172235783Skib		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5173235783Skib		if (reduced_clock)
5174235783Skib			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5175235783Skib				reduced_clock->m2;
5176235783Skib	}
5177235783Skib
5178235783Skib	I915_WRITE(FP0(pipe), fp);
5179235783Skib
5180235783Skib	intel_crtc->lowfreq_avail = false;
5181235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5182235783Skib	    reduced_clock && i915_powersave) {
5183235783Skib		I915_WRITE(FP1(pipe), fp2);
5184235783Skib		intel_crtc->lowfreq_avail = true;
5185235783Skib	} else {
5186235783Skib		I915_WRITE(FP1(pipe), fp);
5187235783Skib	}
5188235783Skib}
5189235783Skib
5190235783Skibstatic int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5191235783Skib			      struct drm_display_mode *mode,
5192235783Skib			      struct drm_display_mode *adjusted_mode,
5193235783Skib			      int x, int y,
5194235783Skib			      struct drm_framebuffer *old_fb)
5195235783Skib{
5196235783Skib	struct drm_device *dev = crtc->dev;
5197235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5198235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5199235783Skib	int pipe = intel_crtc->pipe;
5200235783Skib	int plane = intel_crtc->plane;
5201235783Skib	int refclk, num_connectors = 0;
5202235783Skib	intel_clock_t clock, reduced_clock;
5203235783Skib	u32 dpll, dspcntr, pipeconf, vsyncshift;
5204235783Skib	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5205235783Skib	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5206235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
5207235783Skib	struct intel_encoder *encoder;
5208235783Skib	const intel_limit_t *limit;
5209235783Skib	int ret;
5210235783Skib	u32 temp;
5211235783Skib	u32 lvds_sync = 0;
5212235783Skib
5213235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5214235783Skib		if (encoder->base.crtc != crtc)
5215235783Skib			continue;
5216235783Skib
5217235783Skib		switch (encoder->type) {
5218235783Skib		case INTEL_OUTPUT_LVDS:
5219235783Skib			is_lvds = true;
5220235783Skib			break;
5221235783Skib		case INTEL_OUTPUT_SDVO:
5222235783Skib		case INTEL_OUTPUT_HDMI:
5223235783Skib			is_sdvo = true;
5224235783Skib			if (encoder->needs_tv_clock)
5225235783Skib				is_tv = true;
5226235783Skib			break;
5227235783Skib		case INTEL_OUTPUT_DVO:
5228235783Skib			is_dvo = true;
5229235783Skib			break;
5230235783Skib		case INTEL_OUTPUT_TVOUT:
5231235783Skib			is_tv = true;
5232235783Skib			break;
5233235783Skib		case INTEL_OUTPUT_ANALOG:
5234235783Skib			is_crt = true;
5235235783Skib			break;
5236235783Skib		case INTEL_OUTPUT_DISPLAYPORT:
5237235783Skib			is_dp = true;
5238235783Skib			break;
5239235783Skib		}
5240235783Skib
5241235783Skib		num_connectors++;
5242235783Skib	}
5243235783Skib
5244235783Skib	refclk = i9xx_get_refclk(crtc, num_connectors);
5245235783Skib
5246235783Skib	/*
5247235783Skib	 * Returns a set of divisors for the desired target clock with the given
5248235783Skib	 * refclk, or false.  The returned values represent the clock equation:
5249235783Skib	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5250235783Skib	 */
5251235783Skib	limit = intel_limit(crtc, refclk);
5252235783Skib	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5253235783Skib			     &clock);
5254235783Skib	if (!ok) {
5255235783Skib		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5256235783Skib		return -EINVAL;
5257235783Skib	}
5258235783Skib
5259235783Skib	/* Ensure that the cursor is valid for the new mode before changing... */
5260235783Skib	intel_crtc_update_cursor(crtc, true);
5261235783Skib
5262235783Skib	if (is_lvds && dev_priv->lvds_downclock_avail) {
5263235783Skib		/*
5264235783Skib		 * Ensure we match the reduced clock's P to the target clock.
5265235783Skib		 * If the clocks don't match, we can't switch the display clock
5266235783Skib		 * by using the FP0/FP1. In such case we will disable the LVDS
5267235783Skib		 * downclock feature.
5268235783Skib		*/
5269235783Skib		has_reduced_clock = limit->find_pll(limit, crtc,
5270235783Skib						    dev_priv->lvds_downclock,
5271235783Skib						    refclk,
5272235783Skib						    &clock,
5273235783Skib						    &reduced_clock);
5274235783Skib	}
5275235783Skib
5276235783Skib	if (is_sdvo && is_tv)
5277235783Skib		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5278235783Skib
5279235783Skib	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5280235783Skib				 &reduced_clock : NULL);
5281235783Skib
5282235783Skib	dpll = DPLL_VGA_MODE_DIS;
5283235783Skib
5284235783Skib	if (!IS_GEN2(dev)) {
5285235783Skib		if (is_lvds)
5286235783Skib			dpll |= DPLLB_MODE_LVDS;
5287235783Skib		else
5288235783Skib			dpll |= DPLLB_MODE_DAC_SERIAL;
5289235783Skib		if (is_sdvo) {
5290235783Skib			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5291235783Skib			if (pixel_multiplier > 1) {
5292235783Skib				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5293235783Skib					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5294235783Skib			}
5295235783Skib			dpll |= DPLL_DVO_HIGH_SPEED;
5296235783Skib		}
5297235783Skib		if (is_dp)
5298235783Skib			dpll |= DPLL_DVO_HIGH_SPEED;
5299235783Skib
5300235783Skib		/* compute bitmask from p1 value */
5301235783Skib		if (IS_PINEVIEW(dev))
5302235783Skib			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5303235783Skib		else {
5304235783Skib			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5305235783Skib			if (IS_G4X(dev) && has_reduced_clock)
5306235783Skib				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5307235783Skib		}
5308235783Skib		switch (clock.p2) {
5309235783Skib		case 5:
5310235783Skib			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5311235783Skib			break;
5312235783Skib		case 7:
5313235783Skib			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5314235783Skib			break;
5315235783Skib		case 10:
5316235783Skib			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5317235783Skib			break;
5318235783Skib		case 14:
5319235783Skib			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5320235783Skib			break;
5321235783Skib		}
5322235783Skib		if (INTEL_INFO(dev)->gen >= 4)
5323235783Skib			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5324235783Skib	} else {
5325235783Skib		if (is_lvds) {
5326235783Skib			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5327235783Skib		} else {
5328235783Skib			if (clock.p1 == 2)
5329235783Skib				dpll |= PLL_P1_DIVIDE_BY_TWO;
5330235783Skib			else
5331235783Skib				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5332235783Skib			if (clock.p2 == 4)
5333235783Skib				dpll |= PLL_P2_DIVIDE_BY_4;
5334235783Skib		}
5335235783Skib	}
5336235783Skib
5337235783Skib	if (is_sdvo && is_tv)
5338235783Skib		dpll |= PLL_REF_INPUT_TVCLKINBC;
5339235783Skib	else if (is_tv)
5340235783Skib		/* XXX: just matching BIOS for now */
5341235783Skib		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5342235783Skib		dpll |= 3;
5343235783Skib	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5344235783Skib		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5345235783Skib	else
5346235783Skib		dpll |= PLL_REF_INPUT_DREFCLK;
5347235783Skib
5348235783Skib	/* setup pipeconf */
5349235783Skib	pipeconf = I915_READ(PIPECONF(pipe));
5350235783Skib
5351235783Skib	/* Set up the display plane register */
5352235783Skib	dspcntr = DISPPLANE_GAMMA_ENABLE;
5353235783Skib
5354235783Skib	if (pipe == 0)
5355235783Skib		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5356235783Skib	else
5357235783Skib		dspcntr |= DISPPLANE_SEL_PIPE_B;
5358235783Skib
5359235783Skib	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5360235783Skib		/* Enable pixel doubling when the dot clock is > 90% of the (display)
5361235783Skib		 * core speed.
5362235783Skib		 *
5363235783Skib		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5364235783Skib		 * pipe == 0 check?
5365235783Skib		 */
5366235783Skib		if (mode->clock >
5367235783Skib		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5368235783Skib			pipeconf |= PIPECONF_DOUBLE_WIDE;
5369235783Skib		else
5370235783Skib			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5371235783Skib	}
5372235783Skib
5373235783Skib	/* default to 8bpc */
5374235783Skib	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5375235783Skib	if (is_dp) {
5376235783Skib		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5377235783Skib			pipeconf |= PIPECONF_BPP_6 |
5378235783Skib				    PIPECONF_DITHER_EN |
5379235783Skib				    PIPECONF_DITHER_TYPE_SP;
5380235783Skib		}
5381235783Skib	}
5382235783Skib
5383235783Skib	dpll |= DPLL_VCO_ENABLE;
5384235783Skib
5385235783Skib	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5386235783Skib	drm_mode_debug_printmodeline(mode);
5387235783Skib
5388235783Skib	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5389235783Skib
5390235783Skib	POSTING_READ(DPLL(pipe));
5391235783Skib	DELAY(150);
5392235783Skib
5393235783Skib	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5394235783Skib	 * This is an exception to the general rule that mode_set doesn't turn
5395235783Skib	 * things on.
5396235783Skib	 */
5397235783Skib	if (is_lvds) {
5398235783Skib		temp = I915_READ(LVDS);
5399235783Skib		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5400235783Skib		if (pipe == 1) {
5401235783Skib			temp |= LVDS_PIPEB_SELECT;
5402235783Skib		} else {
5403235783Skib			temp &= ~LVDS_PIPEB_SELECT;
5404235783Skib		}
5405235783Skib		/* set the corresponsding LVDS_BORDER bit */
5406235783Skib		temp |= dev_priv->lvds_border_bits;
5407235783Skib		/* Set the B0-B3 data pairs corresponding to whether we're going to
5408235783Skib		 * set the DPLLs for dual-channel mode or not.
5409235783Skib		 */
5410235783Skib		if (clock.p2 == 7)
5411235783Skib			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5412235783Skib		else
5413235783Skib			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5414235783Skib
5415235783Skib		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5416235783Skib		 * appropriately here, but we need to look more thoroughly into how
5417235783Skib		 * panels behave in the two modes.
5418235783Skib		 */
5419235783Skib		/* set the dithering flag on LVDS as needed */
5420235783Skib		if (INTEL_INFO(dev)->gen >= 4) {
5421235783Skib			if (dev_priv->lvds_dither)
5422235783Skib				temp |= LVDS_ENABLE_DITHER;
5423235783Skib			else
5424235783Skib				temp &= ~LVDS_ENABLE_DITHER;
5425235783Skib		}
5426235783Skib		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5427235783Skib			lvds_sync |= LVDS_HSYNC_POLARITY;
5428235783Skib		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5429235783Skib			lvds_sync |= LVDS_VSYNC_POLARITY;
5430235783Skib		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5431235783Skib		    != lvds_sync) {
5432235783Skib			char flags[2] = "-+";
5433235783Skib			DRM_INFO("Changing LVDS panel from "
5434235783Skib				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5435235783Skib				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5436235783Skib				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5437235783Skib				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5438235783Skib				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5439235783Skib			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5440235783Skib			temp |= lvds_sync;
5441235783Skib		}
5442235783Skib		I915_WRITE(LVDS, temp);
5443235783Skib	}
5444235783Skib
5445235783Skib	if (is_dp) {
5446235783Skib		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5447235783Skib	}
5448235783Skib
5449235783Skib	I915_WRITE(DPLL(pipe), dpll);
5450235783Skib
5451235783Skib	/* Wait for the clocks to stabilize. */
5452235783Skib	POSTING_READ(DPLL(pipe));
5453235783Skib	DELAY(150);
5454235783Skib
5455235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
5456235783Skib		temp = 0;
5457235783Skib		if (is_sdvo) {
5458235783Skib			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5459235783Skib			if (temp > 1)
5460235783Skib				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5461235783Skib			else
5462235783Skib				temp = 0;
5463235783Skib		}
5464235783Skib		I915_WRITE(DPLL_MD(pipe), temp);
5465235783Skib	} else {
5466235783Skib		/* The pixel multiplier can only be updated once the
5467235783Skib		 * DPLL is enabled and the clocks are stable.
5468235783Skib		 *
5469235783Skib		 * So write it again.
5470235783Skib		 */
5471235783Skib		I915_WRITE(DPLL(pipe), dpll);
5472235783Skib	}
5473235783Skib
5474235783Skib	if (HAS_PIPE_CXSR(dev)) {
5475235783Skib		if (intel_crtc->lowfreq_avail) {
5476235783Skib			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5477235783Skib			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5478235783Skib		} else {
5479235783Skib			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5480235783Skib			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5481235783Skib		}
5482235783Skib	}
5483235783Skib
5484235783Skib	pipeconf &= ~PIPECONF_INTERLACE_MASK;
5485235783Skib	if (!IS_GEN2(dev) &&
5486235783Skib	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5487235783Skib		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5488235783Skib		/* the chip adds 2 halflines automatically */
5489235783Skib		adjusted_mode->crtc_vtotal -= 1;
5490235783Skib		adjusted_mode->crtc_vblank_end -= 1;
5491235783Skib		vsyncshift = adjusted_mode->crtc_hsync_start
5492235783Skib			     - adjusted_mode->crtc_htotal/2;
5493235783Skib	} else {
5494235783Skib		pipeconf |= PIPECONF_PROGRESSIVE;
5495235783Skib		vsyncshift = 0;
5496235783Skib	}
5497235783Skib
5498235783Skib	if (!IS_GEN3(dev))
5499235783Skib		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5500235783Skib
5501235783Skib	I915_WRITE(HTOTAL(pipe),
5502235783Skib		   (adjusted_mode->crtc_hdisplay - 1) |
5503235783Skib		   ((adjusted_mode->crtc_htotal - 1) << 16));
5504235783Skib	I915_WRITE(HBLANK(pipe),
5505235783Skib		   (adjusted_mode->crtc_hblank_start - 1) |
5506235783Skib		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5507235783Skib	I915_WRITE(HSYNC(pipe),
5508235783Skib		   (adjusted_mode->crtc_hsync_start - 1) |
5509235783Skib		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5510235783Skib
5511235783Skib	I915_WRITE(VTOTAL(pipe),
5512235783Skib		   (adjusted_mode->crtc_vdisplay - 1) |
5513235783Skib		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5514235783Skib	I915_WRITE(VBLANK(pipe),
5515235783Skib		   (adjusted_mode->crtc_vblank_start - 1) |
5516235783Skib		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5517235783Skib	I915_WRITE(VSYNC(pipe),
5518235783Skib		   (adjusted_mode->crtc_vsync_start - 1) |
5519235783Skib		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5520235783Skib
5521235783Skib	/* pipesrc and dspsize control the size that is scaled from,
5522235783Skib	 * which should always be the user's requested size.
5523235783Skib	 */
5524235783Skib	I915_WRITE(DSPSIZE(plane),
5525235783Skib		   ((mode->vdisplay - 1) << 16) |
5526235783Skib		   (mode->hdisplay - 1));
5527235783Skib	I915_WRITE(DSPPOS(plane), 0);
5528235783Skib	I915_WRITE(PIPESRC(pipe),
5529235783Skib		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5530235783Skib
5531235783Skib	I915_WRITE(PIPECONF(pipe), pipeconf);
5532235783Skib	POSTING_READ(PIPECONF(pipe));
5533235783Skib	intel_enable_pipe(dev_priv, pipe, false);
5534235783Skib
5535235783Skib	intel_wait_for_vblank(dev, pipe);
5536235783Skib
5537235783Skib	I915_WRITE(DSPCNTR(plane), dspcntr);
5538235783Skib	POSTING_READ(DSPCNTR(plane));
5539235783Skib	intel_enable_plane(dev_priv, plane, pipe);
5540235783Skib
5541235783Skib	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5542235783Skib
5543235783Skib	intel_update_watermarks(dev);
5544235783Skib
5545235783Skib	return ret;
5546235783Skib}
5547235783Skib
5548235783Skib/*
5549235783Skib * Initialize reference clocks when the driver loads
5550235783Skib */
5551235783Skibvoid ironlake_init_pch_refclk(struct drm_device *dev)
5552235783Skib{
5553235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5554235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
5555235783Skib	struct intel_encoder *encoder;
5556235783Skib	u32 temp;
5557235783Skib	bool has_lvds = false;
5558235783Skib	bool has_cpu_edp = false;
5559235783Skib	bool has_pch_edp = false;
5560235783Skib	bool has_panel = false;
5561235783Skib	bool has_ck505 = false;
5562235783Skib	bool can_ssc = false;
5563235783Skib
5564235783Skib	/* We need to take the global config into account */
5565235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list,
5566235783Skib			    base.head) {
5567235783Skib		switch (encoder->type) {
5568235783Skib		case INTEL_OUTPUT_LVDS:
5569235783Skib			has_panel = true;
5570235783Skib			has_lvds = true;
5571235783Skib			break;
5572235783Skib		case INTEL_OUTPUT_EDP:
5573235783Skib			has_panel = true;
5574235783Skib			if (intel_encoder_is_pch_edp(&encoder->base))
5575235783Skib				has_pch_edp = true;
5576235783Skib			else
5577235783Skib				has_cpu_edp = true;
5578235783Skib			break;
5579235783Skib		}
5580235783Skib	}
5581235783Skib
5582235783Skib	if (HAS_PCH_IBX(dev)) {
5583235783Skib		has_ck505 = dev_priv->display_clock_mode;
5584235783Skib		can_ssc = has_ck505;
5585235783Skib	} else {
5586235783Skib		has_ck505 = false;
5587235783Skib		can_ssc = true;
5588235783Skib	}
5589235783Skib
5590235783Skib	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5591235783Skib		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5592235783Skib		      has_ck505);
5593235783Skib
5594235783Skib	/* Ironlake: try to setup display ref clock before DPLL
5595235783Skib	 * enabling. This is only under driver's control after
5596235783Skib	 * PCH B stepping, previous chipset stepping should be
5597235783Skib	 * ignoring this setting.
5598235783Skib	 */
5599235783Skib	temp = I915_READ(PCH_DREF_CONTROL);
5600235783Skib	/* Always enable nonspread source */
5601235783Skib	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5602235783Skib
5603235783Skib	if (has_ck505)
5604235783Skib		temp |= DREF_NONSPREAD_CK505_ENABLE;
5605235783Skib	else
5606235783Skib		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5607235783Skib
5608235783Skib	if (has_panel) {
5609235783Skib		temp &= ~DREF_SSC_SOURCE_MASK;
5610235783Skib		temp |= DREF_SSC_SOURCE_ENABLE;
5611235783Skib
5612235783Skib		/* SSC must be turned on before enabling the CPU output  */
5613235783Skib		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5614235783Skib			DRM_DEBUG_KMS("Using SSC on panel\n");
5615235783Skib			temp |= DREF_SSC1_ENABLE;
5616235783Skib		} else
5617235783Skib			temp &= ~DREF_SSC1_ENABLE;
5618235783Skib
5619235783Skib		/* Get SSC going before enabling the outputs */
5620235783Skib		I915_WRITE(PCH_DREF_CONTROL, temp);
5621235783Skib		POSTING_READ(PCH_DREF_CONTROL);
5622235783Skib		DELAY(200);
5623235783Skib
5624235783Skib		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5625235783Skib
5626235783Skib		/* Enable CPU source on CPU attached eDP */
5627235783Skib		if (has_cpu_edp) {
5628235783Skib			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5629235783Skib				DRM_DEBUG_KMS("Using SSC on eDP\n");
5630235783Skib				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5631235783Skib			}
5632235783Skib			else
5633235783Skib				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5634235783Skib		} else
5635235783Skib			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5636235783Skib
5637235783Skib		I915_WRITE(PCH_DREF_CONTROL, temp);
5638235783Skib		POSTING_READ(PCH_DREF_CONTROL);
5639235783Skib		DELAY(200);
5640235783Skib	} else {
5641235783Skib		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5642235783Skib
5643235783Skib		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5644235783Skib
5645235783Skib		/* Turn off CPU output */
5646235783Skib		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5647235783Skib
5648235783Skib		I915_WRITE(PCH_DREF_CONTROL, temp);
5649235783Skib		POSTING_READ(PCH_DREF_CONTROL);
5650235783Skib		DELAY(200);
5651235783Skib
5652235783Skib		/* Turn off the SSC source */
5653235783Skib		temp &= ~DREF_SSC_SOURCE_MASK;
5654235783Skib		temp |= DREF_SSC_SOURCE_DISABLE;
5655235783Skib
5656235783Skib		/* Turn off SSC1 */
5657235783Skib		temp &= ~ DREF_SSC1_ENABLE;
5658235783Skib
5659235783Skib		I915_WRITE(PCH_DREF_CONTROL, temp);
5660235783Skib		POSTING_READ(PCH_DREF_CONTROL);
5661235783Skib		DELAY(200);
5662235783Skib	}
5663235783Skib}
5664235783Skib
5665235783Skibstatic int ironlake_get_refclk(struct drm_crtc *crtc)
5666235783Skib{
5667235783Skib	struct drm_device *dev = crtc->dev;
5668235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5669235783Skib	struct intel_encoder *encoder;
5670235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
5671235783Skib	struct intel_encoder *edp_encoder = NULL;
5672235783Skib	int num_connectors = 0;
5673235783Skib	bool is_lvds = false;
5674235783Skib
5675235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5676235783Skib		if (encoder->base.crtc != crtc)
5677235783Skib			continue;
5678235783Skib
5679235783Skib		switch (encoder->type) {
5680235783Skib		case INTEL_OUTPUT_LVDS:
5681235783Skib			is_lvds = true;
5682235783Skib			break;
5683235783Skib		case INTEL_OUTPUT_EDP:
5684235783Skib			edp_encoder = encoder;
5685235783Skib			break;
5686235783Skib		}
5687235783Skib		num_connectors++;
5688235783Skib	}
5689235783Skib
5690235783Skib	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5691235783Skib		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5692235783Skib			      dev_priv->lvds_ssc_freq);
5693235783Skib		return dev_priv->lvds_ssc_freq * 1000;
5694235783Skib	}
5695235783Skib
5696235783Skib	return 120000;
5697235783Skib}
5698235783Skib
5699235783Skibstatic int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5700235783Skib				  struct drm_display_mode *mode,
5701235783Skib				  struct drm_display_mode *adjusted_mode,
5702235783Skib				  int x, int y,
5703235783Skib				  struct drm_framebuffer *old_fb)
5704235783Skib{
5705235783Skib	struct drm_device *dev = crtc->dev;
5706235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
5707235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5708235783Skib	int pipe = intel_crtc->pipe;
5709235783Skib	int plane = intel_crtc->plane;
5710235783Skib	int refclk, num_connectors = 0;
5711235783Skib	intel_clock_t clock, reduced_clock;
5712235783Skib	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5713235783Skib	bool ok, has_reduced_clock = false, is_sdvo = false;
5714235783Skib	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5715235783Skib	struct intel_encoder *has_edp_encoder = NULL;
5716235783Skib	struct drm_mode_config *mode_config = &dev->mode_config;
5717235783Skib	struct intel_encoder *encoder;
5718235783Skib	const intel_limit_t *limit;
5719235783Skib	int ret;
5720235783Skib	struct fdi_m_n m_n = {0};
5721235783Skib	u32 temp;
5722235783Skib	u32 lvds_sync = 0;
5723235783Skib	int target_clock, pixel_multiplier, lane, link_bw, factor;
5724235783Skib	unsigned int pipe_bpp;
5725235783Skib	bool dither;
5726235783Skib
5727235783Skib	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5728235783Skib		if (encoder->base.crtc != crtc)
5729235783Skib			continue;
5730235783Skib
5731235783Skib		switch (encoder->type) {
5732235783Skib		case INTEL_OUTPUT_LVDS:
5733235783Skib			is_lvds = true;
5734235783Skib			break;
5735235783Skib		case INTEL_OUTPUT_SDVO:
5736235783Skib		case INTEL_OUTPUT_HDMI:
5737235783Skib			is_sdvo = true;
5738235783Skib			if (encoder->needs_tv_clock)
5739235783Skib				is_tv = true;
5740235783Skib			break;
5741235783Skib		case INTEL_OUTPUT_TVOUT:
5742235783Skib			is_tv = true;
5743235783Skib			break;
5744235783Skib		case INTEL_OUTPUT_ANALOG:
5745235783Skib			is_crt = true;
5746235783Skib			break;
5747235783Skib		case INTEL_OUTPUT_DISPLAYPORT:
5748235783Skib			is_dp = true;
5749235783Skib			break;
5750235783Skib		case INTEL_OUTPUT_EDP:
5751235783Skib			has_edp_encoder = encoder;
5752235783Skib			break;
5753235783Skib		}
5754235783Skib
5755235783Skib		num_connectors++;
5756235783Skib	}
5757235783Skib
5758235783Skib	refclk = ironlake_get_refclk(crtc);
5759235783Skib
5760235783Skib	/*
5761235783Skib	 * Returns a set of divisors for the desired target clock with the given
5762235783Skib	 * refclk, or false.  The returned values represent the clock equation:
5763235783Skib	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5764235783Skib	 */
5765235783Skib	limit = intel_limit(crtc, refclk);
5766235783Skib	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5767235783Skib			     &clock);
5768235783Skib	if (!ok) {
5769235783Skib		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5770235783Skib		return -EINVAL;
5771235783Skib	}
5772235783Skib
5773235783Skib	/* Ensure that the cursor is valid for the new mode before changing... */
5774235783Skib	intel_crtc_update_cursor(crtc, true);
5775235783Skib
5776235783Skib	if (is_lvds && dev_priv->lvds_downclock_avail) {
5777235783Skib		/*
5778235783Skib		 * Ensure we match the reduced clock's P to the target clock.
5779235783Skib		 * If the clocks don't match, we can't switch the display clock
5780235783Skib		 * by using the FP0/FP1. In such case we will disable the LVDS
5781235783Skib		 * downclock feature.
5782235783Skib		*/
5783235783Skib		has_reduced_clock = limit->find_pll(limit, crtc,
5784235783Skib						    dev_priv->lvds_downclock,
5785235783Skib						    refclk,
5786235783Skib						    &clock,
5787235783Skib						    &reduced_clock);
5788235783Skib	}
5789235783Skib	/* SDVO TV has fixed PLL values depend on its clock range,
5790235783Skib	   this mirrors vbios setting. */
5791235783Skib	if (is_sdvo && is_tv) {
5792235783Skib		if (adjusted_mode->clock >= 100000
5793235783Skib		    && adjusted_mode->clock < 140500) {
5794235783Skib			clock.p1 = 2;
5795235783Skib			clock.p2 = 10;
5796235783Skib			clock.n = 3;
5797235783Skib			clock.m1 = 16;
5798235783Skib			clock.m2 = 8;
5799235783Skib		} else if (adjusted_mode->clock >= 140500
5800235783Skib			   && adjusted_mode->clock <= 200000) {
5801235783Skib			clock.p1 = 1;
5802235783Skib			clock.p2 = 10;
5803235783Skib			clock.n = 6;
5804235783Skib			clock.m1 = 12;
5805235783Skib			clock.m2 = 8;
5806235783Skib		}
5807235783Skib	}
5808235783Skib
5809235783Skib	/* FDI link */
5810235783Skib	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5811235783Skib	lane = 0;
5812235783Skib	/* CPU eDP doesn't require FDI link, so just set DP M/N
5813235783Skib	   according to current link config */
5814235783Skib	if (has_edp_encoder &&
5815235783Skib	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5816235783Skib		target_clock = mode->clock;
5817235783Skib		intel_edp_link_config(has_edp_encoder,
5818235783Skib				      &lane, &link_bw);
5819235783Skib	} else {
5820235783Skib		/* [e]DP over FDI requires target mode clock
5821235783Skib		   instead of link clock */
5822235783Skib		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5823235783Skib			target_clock = mode->clock;
5824235783Skib		else
5825235783Skib			target_clock = adjusted_mode->clock;
5826235783Skib
5827235783Skib		/* FDI is a binary signal running at ~2.7GHz, encoding
5828235783Skib		 * each output octet as 10 bits. The actual frequency
5829235783Skib		 * is stored as a divider into a 100MHz clock, and the
5830235783Skib		 * mode pixel clock is stored in units of 1KHz.
5831235783Skib		 * Hence the bw of each lane in terms of the mode signal
5832235783Skib		 * is:
5833235783Skib		 */
5834235783Skib		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5835235783Skib	}
5836235783Skib
5837235783Skib	/* determine panel color depth */
5838235783Skib	temp = I915_READ(PIPECONF(pipe));
5839235783Skib	temp &= ~PIPE_BPC_MASK;
5840235783Skib	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5841235783Skib	switch (pipe_bpp) {
5842235783Skib	case 18:
5843235783Skib		temp |= PIPE_6BPC;
5844235783Skib		break;
5845235783Skib	case 24:
5846235783Skib		temp |= PIPE_8BPC;
5847235783Skib		break;
5848235783Skib	case 30:
5849235783Skib		temp |= PIPE_10BPC;
5850235783Skib		break;
5851235783Skib	case 36:
5852235783Skib		temp |= PIPE_12BPC;
5853235783Skib		break;
5854235783Skib	default:
5855235783Skib		printf("intel_choose_pipe_bpp returned invalid value %d\n",
5856235783Skib			pipe_bpp);
5857235783Skib		temp |= PIPE_8BPC;
5858235783Skib		pipe_bpp = 24;
5859235783Skib		break;
5860235783Skib	}
5861235783Skib
5862235783Skib	intel_crtc->bpp = pipe_bpp;
5863235783Skib	I915_WRITE(PIPECONF(pipe), temp);
5864235783Skib
5865235783Skib	if (!lane) {
5866235783Skib		/*
5867235783Skib		 * Account for spread spectrum to avoid
5868235783Skib		 * oversubscribing the link. Max center spread
5869235783Skib		 * is 2.5%; use 5% for safety's sake.
5870235783Skib		 */
5871235783Skib		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5872235783Skib		lane = bps / (link_bw * 8) + 1;
5873235783Skib	}
5874235783Skib
5875235783Skib	intel_crtc->fdi_lanes = lane;
5876235783Skib
5877235783Skib	if (pixel_multiplier > 1)
5878235783Skib		link_bw *= pixel_multiplier;
5879235783Skib	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5880235783Skib			     &m_n);
5881235783Skib
5882235783Skib	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5883235783Skib	if (has_reduced_clock)
5884235783Skib		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5885235783Skib			reduced_clock.m2;
5886235783Skib
5887235783Skib	/* Enable autotuning of the PLL clock (if permissible) */
5888235783Skib	factor = 21;
5889235783Skib	if (is_lvds) {
5890235783Skib		if ((intel_panel_use_ssc(dev_priv) &&
5891235783Skib		     dev_priv->lvds_ssc_freq == 100) ||
5892235783Skib		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5893235783Skib			factor = 25;
5894235783Skib	} else if (is_sdvo && is_tv)
5895235783Skib		factor = 20;
5896235783Skib
5897235783Skib	if (clock.m < factor * clock.n)
5898235783Skib		fp |= FP_CB_TUNE;
5899235783Skib
5900235783Skib	dpll = 0;
5901235783Skib
5902235783Skib	if (is_lvds)
5903235783Skib		dpll |= DPLLB_MODE_LVDS;
5904235783Skib	else
5905235783Skib		dpll |= DPLLB_MODE_DAC_SERIAL;
5906235783Skib	if (is_sdvo) {
5907235783Skib		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5908235783Skib		if (pixel_multiplier > 1) {
5909235783Skib			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5910235783Skib		}
5911235783Skib		dpll |= DPLL_DVO_HIGH_SPEED;
5912235783Skib	}
5913235783Skib	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5914235783Skib		dpll |= DPLL_DVO_HIGH_SPEED;
5915235783Skib
5916235783Skib	/* compute bitmask from p1 value */
5917235783Skib	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5918235783Skib	/* also FPA1 */
5919235783Skib	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5920235783Skib
5921235783Skib	switch (clock.p2) {
5922235783Skib	case 5:
5923235783Skib		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5924235783Skib		break;
5925235783Skib	case 7:
5926235783Skib		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5927235783Skib		break;
5928235783Skib	case 10:
5929235783Skib		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5930235783Skib		break;
5931235783Skib	case 14:
5932235783Skib		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5933235783Skib		break;
5934235783Skib	}
5935235783Skib
5936235783Skib	if (is_sdvo && is_tv)
5937235783Skib		dpll |= PLL_REF_INPUT_TVCLKINBC;
5938235783Skib	else if (is_tv)
5939235783Skib		/* XXX: just matching BIOS for now */
5940235783Skib		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5941235783Skib		dpll |= 3;
5942235783Skib	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5943235783Skib		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5944235783Skib	else
5945235783Skib		dpll |= PLL_REF_INPUT_DREFCLK;
5946235783Skib
5947235783Skib	/* setup pipeconf */
5948235783Skib	pipeconf = I915_READ(PIPECONF(pipe));
5949235783Skib
5950235783Skib	/* Set up the display plane register */
5951235783Skib	dspcntr = DISPPLANE_GAMMA_ENABLE;
5952235783Skib
5953235783Skib	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5954235783Skib	drm_mode_debug_printmodeline(mode);
5955235783Skib
5956235783Skib	/* PCH eDP needs FDI, but CPU eDP does not */
5957235783Skib	if (!intel_crtc->no_pll) {
5958235783Skib		if (!has_edp_encoder ||
5959235783Skib		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5960235783Skib			I915_WRITE(PCH_FP0(pipe), fp);
5961235783Skib			I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5962235783Skib
5963235783Skib			POSTING_READ(PCH_DPLL(pipe));
5964235783Skib			DELAY(150);
5965235783Skib		}
5966235783Skib	} else {
5967235783Skib		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5968235783Skib		    fp == I915_READ(PCH_FP0(0))) {
5969235783Skib			intel_crtc->use_pll_a = true;
5970235783Skib			DRM_DEBUG_KMS("using pipe a dpll\n");
5971235783Skib		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5972235783Skib			   fp == I915_READ(PCH_FP0(1))) {
5973235783Skib			intel_crtc->use_pll_a = false;
5974235783Skib			DRM_DEBUG_KMS("using pipe b dpll\n");
5975235783Skib		} else {
5976235783Skib			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5977235783Skib			return -EINVAL;
5978235783Skib		}
5979235783Skib	}
5980235783Skib
5981235783Skib	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5982235783Skib	 * This is an exception to the general rule that mode_set doesn't turn
5983235783Skib	 * things on.
5984235783Skib	 */
5985235783Skib	if (is_lvds) {
5986235783Skib		temp = I915_READ(PCH_LVDS);
5987235783Skib		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5988235783Skib		if (HAS_PCH_CPT(dev)) {
5989235783Skib			temp &= ~PORT_TRANS_SEL_MASK;
5990235783Skib			temp |= PORT_TRANS_SEL_CPT(pipe);
5991235783Skib		} else {
5992235783Skib			if (pipe == 1)
5993235783Skib				temp |= LVDS_PIPEB_SELECT;
5994235783Skib			else
5995235783Skib				temp &= ~LVDS_PIPEB_SELECT;
5996235783Skib		}
5997235783Skib
5998235783Skib		/* set the corresponsding LVDS_BORDER bit */
5999235783Skib		temp |= dev_priv->lvds_border_bits;
6000235783Skib		/* Set the B0-B3 data pairs corresponding to whether we're going to
6001235783Skib		 * set the DPLLs for dual-channel mode or not.
6002235783Skib		 */
6003235783Skib		if (clock.p2 == 7)
6004235783Skib			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
6005235783Skib		else
6006235783Skib			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
6007235783Skib
6008235783Skib		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
6009235783Skib		 * appropriately here, but we need to look more thoroughly into how
6010235783Skib		 * panels behave in the two modes.
6011235783Skib		 */
6012235783Skib		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6013235783Skib			lvds_sync |= LVDS_HSYNC_POLARITY;
6014235783Skib		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6015235783Skib			lvds_sync |= LVDS_VSYNC_POLARITY;
6016235783Skib		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6017235783Skib		    != lvds_sync) {
6018235783Skib			char flags[2] = "-+";
6019235783Skib			DRM_INFO("Changing LVDS panel from "
6020235783Skib				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6021235783Skib				 flags[!(temp & LVDS_HSYNC_POLARITY)],
6022235783Skib				 flags[!(temp & LVDS_VSYNC_POLARITY)],
6023235783Skib				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6024235783Skib				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6025235783Skib			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6026235783Skib			temp |= lvds_sync;
6027235783Skib		}
6028235783Skib		I915_WRITE(PCH_LVDS, temp);
6029235783Skib	}
6030235783Skib
6031235783Skib	pipeconf &= ~PIPECONF_DITHER_EN;
6032235783Skib	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6033235783Skib	if ((is_lvds && dev_priv->lvds_dither) || dither) {
6034235783Skib		pipeconf |= PIPECONF_DITHER_EN;
6035235783Skib		pipeconf |= PIPECONF_DITHER_TYPE_SP;
6036235783Skib	}
6037235783Skib	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6038235783Skib		intel_dp_set_m_n(crtc, mode, adjusted_mode);
6039235783Skib	} else {
6040235783Skib		/* For non-DP output, clear any trans DP clock recovery setting.*/
6041235783Skib		I915_WRITE(TRANSDATA_M1(pipe), 0);
6042235783Skib		I915_WRITE(TRANSDATA_N1(pipe), 0);
6043235783Skib		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6044235783Skib		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6045235783Skib	}
6046235783Skib
6047235783Skib	if (!intel_crtc->no_pll &&
6048235783Skib	    (!has_edp_encoder ||
6049235783Skib	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6050235783Skib		I915_WRITE(PCH_DPLL(pipe), dpll);
6051235783Skib
6052235783Skib		/* Wait for the clocks to stabilize. */
6053235783Skib		POSTING_READ(PCH_DPLL(pipe));
6054235783Skib		DELAY(150);
6055235783Skib
6056235783Skib		/* The pixel multiplier can only be updated once the
6057235783Skib		 * DPLL is enabled and the clocks are stable.
6058235783Skib		 *
6059235783Skib		 * So write it again.
6060235783Skib		 */
6061235783Skib		I915_WRITE(PCH_DPLL(pipe), dpll);
6062235783Skib	}
6063235783Skib
6064235783Skib	intel_crtc->lowfreq_avail = false;
6065235783Skib	if (!intel_crtc->no_pll) {
6066235783Skib		if (is_lvds && has_reduced_clock && i915_powersave) {
6067235783Skib			I915_WRITE(PCH_FP1(pipe), fp2);
6068235783Skib			intel_crtc->lowfreq_avail = true;
6069235783Skib			if (HAS_PIPE_CXSR(dev)) {
6070235783Skib				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6071235783Skib				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6072235783Skib			}
6073235783Skib		} else {
6074235783Skib			I915_WRITE(PCH_FP1(pipe), fp);
6075235783Skib			if (HAS_PIPE_CXSR(dev)) {
6076235783Skib				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6077235783Skib				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6078235783Skib			}
6079235783Skib		}
6080235783Skib	}
6081235783Skib
6082235783Skib	pipeconf &= ~PIPECONF_INTERLACE_MASK;
6083235783Skib	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6084235783Skib		pipeconf |= PIPECONF_INTERLACED_ILK;
6085235783Skib		/* the chip adds 2 halflines automatically */
6086235783Skib		adjusted_mode->crtc_vtotal -= 1;
6087235783Skib		adjusted_mode->crtc_vblank_end -= 1;
6088235783Skib		I915_WRITE(VSYNCSHIFT(pipe),
6089235783Skib			   adjusted_mode->crtc_hsync_start
6090235783Skib			   - adjusted_mode->crtc_htotal/2);
6091235783Skib	} else {
6092235783Skib		pipeconf |= PIPECONF_PROGRESSIVE;
6093235783Skib		I915_WRITE(VSYNCSHIFT(pipe), 0);
6094235783Skib	}
6095235783Skib
6096235783Skib	I915_WRITE(HTOTAL(pipe),
6097235783Skib		   (adjusted_mode->crtc_hdisplay - 1) |
6098235783Skib		   ((adjusted_mode->crtc_htotal - 1) << 16));
6099235783Skib	I915_WRITE(HBLANK(pipe),
6100235783Skib		   (adjusted_mode->crtc_hblank_start - 1) |
6101235783Skib		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6102235783Skib	I915_WRITE(HSYNC(pipe),
6103235783Skib		   (adjusted_mode->crtc_hsync_start - 1) |
6104235783Skib		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6105235783Skib
6106235783Skib	I915_WRITE(VTOTAL(pipe),
6107235783Skib		   (adjusted_mode->crtc_vdisplay - 1) |
6108235783Skib		   ((adjusted_mode->crtc_vtotal - 1) << 16));
6109235783Skib	I915_WRITE(VBLANK(pipe),
6110235783Skib		   (adjusted_mode->crtc_vblank_start - 1) |
6111235783Skib		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
6112235783Skib	I915_WRITE(VSYNC(pipe),
6113235783Skib		   (adjusted_mode->crtc_vsync_start - 1) |
6114235783Skib		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6115235783Skib
6116235783Skib	/* pipesrc controls the size that is scaled from, which should
6117235783Skib	 * always be the user's requested size.
6118235783Skib	 */
6119235783Skib	I915_WRITE(PIPESRC(pipe),
6120235783Skib		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6121235783Skib
6122235783Skib	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6123235783Skib	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6124235783Skib	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6125235783Skib	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6126235783Skib
6127235783Skib	if (has_edp_encoder &&
6128235783Skib	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6129235783Skib		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6130235783Skib	}
6131235783Skib
6132235783Skib	I915_WRITE(PIPECONF(pipe), pipeconf);
6133235783Skib	POSTING_READ(PIPECONF(pipe));
6134235783Skib
6135235783Skib	intel_wait_for_vblank(dev, pipe);
6136235783Skib
6137235783Skib	I915_WRITE(DSPCNTR(plane), dspcntr);
6138235783Skib	POSTING_READ(DSPCNTR(plane));
6139235783Skib
6140235783Skib	ret = intel_pipe_set_base(crtc, x, y, old_fb);
6141235783Skib
6142235783Skib	intel_update_watermarks(dev);
6143235783Skib
6144235783Skib	return ret;
6145235783Skib}
6146235783Skib
6147235783Skibstatic int intel_crtc_mode_set(struct drm_crtc *crtc,
6148235783Skib			       struct drm_display_mode *mode,
6149235783Skib			       struct drm_display_mode *adjusted_mode,
6150235783Skib			       int x, int y,
6151235783Skib			       struct drm_framebuffer *old_fb)
6152235783Skib{
6153235783Skib	struct drm_device *dev = crtc->dev;
6154235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6155235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6156235783Skib	int pipe = intel_crtc->pipe;
6157235783Skib	int ret;
6158235783Skib
6159235783Skib	drm_vblank_pre_modeset(dev, pipe);
6160235783Skib
6161235783Skib	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6162235783Skib					      x, y, old_fb);
6163235783Skib	drm_vblank_post_modeset(dev, pipe);
6164235783Skib
6165235783Skib	if (ret)
6166235783Skib		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6167235783Skib	else
6168235783Skib		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6169235783Skib
6170235783Skib	return ret;
6171235783Skib}
6172235783Skib
6173235783Skibstatic bool intel_eld_uptodate(struct drm_connector *connector,
6174235783Skib			       int reg_eldv, uint32_t bits_eldv,
6175235783Skib			       int reg_elda, uint32_t bits_elda,
6176235783Skib			       int reg_edid)
6177235783Skib{
6178235783Skib	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6179235783Skib	uint8_t *eld = connector->eld;
6180235783Skib	uint32_t i;
6181235783Skib
6182235783Skib	i = I915_READ(reg_eldv);
6183235783Skib	i &= bits_eldv;
6184235783Skib
6185235783Skib	if (!eld[0])
6186235783Skib		return !i;
6187235783Skib
6188235783Skib	if (!i)
6189235783Skib		return false;
6190235783Skib
6191235783Skib	i = I915_READ(reg_elda);
6192235783Skib	i &= ~bits_elda;
6193235783Skib	I915_WRITE(reg_elda, i);
6194235783Skib
6195235783Skib	for (i = 0; i < eld[2]; i++)
6196235783Skib		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6197235783Skib			return false;
6198235783Skib
6199235783Skib	return true;
6200235783Skib}
6201235783Skib
6202235783Skibstatic void g4x_write_eld(struct drm_connector *connector,
6203235783Skib			  struct drm_crtc *crtc)
6204235783Skib{
6205235783Skib	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6206235783Skib	uint8_t *eld = connector->eld;
6207235783Skib	uint32_t eldv;
6208235783Skib	uint32_t len;
6209235783Skib	uint32_t i;
6210235783Skib
6211235783Skib	i = I915_READ(G4X_AUD_VID_DID);
6212235783Skib
6213235783Skib	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6214235783Skib		eldv = G4X_ELDV_DEVCL_DEVBLC;
6215235783Skib	else
6216235783Skib		eldv = G4X_ELDV_DEVCTG;
6217235783Skib
6218235783Skib	if (intel_eld_uptodate(connector,
6219235783Skib			       G4X_AUD_CNTL_ST, eldv,
6220235783Skib			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6221235783Skib			       G4X_HDMIW_HDMIEDID))
6222235783Skib		return;
6223235783Skib
6224235783Skib	i = I915_READ(G4X_AUD_CNTL_ST);
6225235783Skib	i &= ~(eldv | G4X_ELD_ADDR);
6226235783Skib	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6227235783Skib	I915_WRITE(G4X_AUD_CNTL_ST, i);
6228235783Skib
6229235783Skib	if (!eld[0])
6230235783Skib		return;
6231235783Skib
6232235783Skib	if (eld[2] < (uint8_t)len)
6233235783Skib		len = eld[2];
6234235783Skib	DRM_DEBUG_KMS("ELD size %d\n", len);
6235235783Skib	for (i = 0; i < len; i++)
6236235783Skib		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6237235783Skib
6238235783Skib	i = I915_READ(G4X_AUD_CNTL_ST);
6239235783Skib	i |= eldv;
6240235783Skib	I915_WRITE(G4X_AUD_CNTL_ST, i);
6241235783Skib}
6242235783Skib
6243235783Skibstatic void ironlake_write_eld(struct drm_connector *connector,
6244235783Skib				     struct drm_crtc *crtc)
6245235783Skib{
6246235783Skib	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6247235783Skib	uint8_t *eld = connector->eld;
6248235783Skib	uint32_t eldv;
6249235783Skib	uint32_t i;
6250235783Skib	int len;
6251235783Skib	int hdmiw_hdmiedid;
6252235783Skib	int aud_config;
6253235783Skib	int aud_cntl_st;
6254235783Skib	int aud_cntrl_st2;
6255235783Skib
6256235783Skib	if (HAS_PCH_IBX(connector->dev)) {
6257235783Skib		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6258235783Skib		aud_config = IBX_AUD_CONFIG_A;
6259235783Skib		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6260235783Skib		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6261235783Skib	} else {
6262235783Skib		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6263235783Skib		aud_config = CPT_AUD_CONFIG_A;
6264235783Skib		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6265235783Skib		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6266235783Skib	}
6267235783Skib
6268235783Skib	i = to_intel_crtc(crtc)->pipe;
6269235783Skib	hdmiw_hdmiedid += i * 0x100;
6270235783Skib	aud_cntl_st += i * 0x100;
6271235783Skib	aud_config += i * 0x100;
6272235783Skib
6273235783Skib	DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
6274235783Skib
6275235783Skib	i = I915_READ(aud_cntl_st);
6276235783Skib	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6277235783Skib	if (!i) {
6278235783Skib		DRM_DEBUG_KMS("Audio directed to unknown port\n");
6279235783Skib		/* operate blindly on all ports */
6280235783Skib		eldv = IBX_ELD_VALIDB;
6281235783Skib		eldv |= IBX_ELD_VALIDB << 4;
6282235783Skib		eldv |= IBX_ELD_VALIDB << 8;
6283235783Skib	} else {
6284235783Skib		DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
6285235783Skib		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6286235783Skib	}
6287235783Skib
6288235783Skib	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6289235783Skib		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6290235783Skib		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6291235783Skib		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6292235783Skib	} else
6293235783Skib		I915_WRITE(aud_config, 0);
6294235783Skib
6295235783Skib	if (intel_eld_uptodate(connector,
6296235783Skib			       aud_cntrl_st2, eldv,
6297235783Skib			       aud_cntl_st, IBX_ELD_ADDRESS,
6298235783Skib			       hdmiw_hdmiedid))
6299235783Skib		return;
6300235783Skib
6301235783Skib	i = I915_READ(aud_cntrl_st2);
6302235783Skib	i &= ~eldv;
6303235783Skib	I915_WRITE(aud_cntrl_st2, i);
6304235783Skib
6305235783Skib	if (!eld[0])
6306235783Skib		return;
6307235783Skib
6308235783Skib	i = I915_READ(aud_cntl_st);
6309235783Skib	i &= ~IBX_ELD_ADDRESS;
6310235783Skib	I915_WRITE(aud_cntl_st, i);
6311235783Skib
6312235783Skib	/* 84 bytes of hw ELD buffer */
6313235783Skib	len = 21;
6314235783Skib	if (eld[2] < (uint8_t)len)
6315235783Skib		len = eld[2];
6316235783Skib	DRM_DEBUG_KMS("ELD size %d\n", len);
6317235783Skib	for (i = 0; i < len; i++)
6318235783Skib		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6319235783Skib
6320235783Skib	i = I915_READ(aud_cntrl_st2);
6321235783Skib	i |= eldv;
6322235783Skib	I915_WRITE(aud_cntrl_st2, i);
6323235783Skib}
6324235783Skib
6325235783Skibvoid intel_write_eld(struct drm_encoder *encoder,
6326235783Skib		     struct drm_display_mode *mode)
6327235783Skib{
6328235783Skib	struct drm_crtc *crtc = encoder->crtc;
6329235783Skib	struct drm_connector *connector;
6330235783Skib	struct drm_device *dev = encoder->dev;
6331235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6332235783Skib
6333235783Skib	connector = drm_select_eld(encoder, mode);
6334235783Skib	if (!connector)
6335235783Skib		return;
6336235783Skib
6337235783Skib	DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6338235783Skib			 connector->base.id,
6339235783Skib			 drm_get_connector_name(connector),
6340235783Skib			 connector->encoder->base.id,
6341235783Skib			 drm_get_encoder_name(connector->encoder));
6342235783Skib
6343235783Skib	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6344235783Skib
6345235783Skib	if (dev_priv->display.write_eld)
6346235783Skib		dev_priv->display.write_eld(connector, crtc);
6347235783Skib}
6348235783Skib
6349235783Skib/** Loads the palette/gamma unit for the CRTC with the prepared values */
6350235783Skibvoid intel_crtc_load_lut(struct drm_crtc *crtc)
6351235783Skib{
6352235783Skib	struct drm_device *dev = crtc->dev;
6353235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6354235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6355235783Skib	int palreg = PALETTE(intel_crtc->pipe);
6356235783Skib	int i;
6357235783Skib
6358235783Skib	/* The clocks have to be on to load the palette. */
6359235783Skib	if (!crtc->enabled || !intel_crtc->active)
6360235783Skib		return;
6361235783Skib
6362235783Skib	/* use legacy palette for Ironlake */
6363235783Skib	if (HAS_PCH_SPLIT(dev))
6364235783Skib		palreg = LGC_PALETTE(intel_crtc->pipe);
6365235783Skib
6366235783Skib	for (i = 0; i < 256; i++) {
6367235783Skib		I915_WRITE(palreg + 4 * i,
6368235783Skib			   (intel_crtc->lut_r[i] << 16) |
6369235783Skib			   (intel_crtc->lut_g[i] << 8) |
6370235783Skib			   intel_crtc->lut_b[i]);
6371235783Skib	}
6372235783Skib}
6373235783Skib
6374235783Skibstatic void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6375235783Skib{
6376235783Skib	struct drm_device *dev = crtc->dev;
6377235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6378235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6379235783Skib	bool visible = base != 0;
6380235783Skib	u32 cntl;
6381235783Skib
6382235783Skib	if (intel_crtc->cursor_visible == visible)
6383235783Skib		return;
6384235783Skib
6385235783Skib	cntl = I915_READ(_CURACNTR);
6386235783Skib	if (visible) {
6387235783Skib		/* On these chipsets we can only modify the base whilst
6388235783Skib		 * the cursor is disabled.
6389235783Skib		 */
6390235783Skib		I915_WRITE(_CURABASE, base);
6391235783Skib
6392235783Skib		cntl &= ~(CURSOR_FORMAT_MASK);
6393235783Skib		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6394235783Skib		cntl |= CURSOR_ENABLE |
6395235783Skib			CURSOR_GAMMA_ENABLE |
6396235783Skib			CURSOR_FORMAT_ARGB;
6397235783Skib	} else
6398235783Skib		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6399235783Skib	I915_WRITE(_CURACNTR, cntl);
6400235783Skib
6401235783Skib	intel_crtc->cursor_visible = visible;
6402235783Skib}
6403235783Skib
6404235783Skibstatic void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6405235783Skib{
6406235783Skib	struct drm_device *dev = crtc->dev;
6407235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6408235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6409235783Skib	int pipe = intel_crtc->pipe;
6410235783Skib	bool visible = base != 0;
6411235783Skib
6412235783Skib	if (intel_crtc->cursor_visible != visible) {
6413235783Skib		uint32_t cntl = I915_READ(CURCNTR(pipe));
6414235783Skib		if (base) {
6415235783Skib			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6416235783Skib			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6417235783Skib			cntl |= pipe << 28; /* Connect to correct pipe */
6418235783Skib		} else {
6419235783Skib			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6420235783Skib			cntl |= CURSOR_MODE_DISABLE;
6421235783Skib		}
6422235783Skib		I915_WRITE(CURCNTR(pipe), cntl);
6423235783Skib
6424235783Skib		intel_crtc->cursor_visible = visible;
6425235783Skib	}
6426235783Skib	/* and commit changes on next vblank */
6427235783Skib	I915_WRITE(CURBASE(pipe), base);
6428235783Skib}
6429235783Skib
6430235783Skibstatic void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6431235783Skib{
6432235783Skib	struct drm_device *dev = crtc->dev;
6433235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6434235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6435235783Skib	int pipe = intel_crtc->pipe;
6436235783Skib	bool visible = base != 0;
6437235783Skib
6438235783Skib	if (intel_crtc->cursor_visible != visible) {
6439235783Skib		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6440235783Skib		if (base) {
6441235783Skib			cntl &= ~CURSOR_MODE;
6442235783Skib			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6443235783Skib		} else {
6444235783Skib			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6445235783Skib			cntl |= CURSOR_MODE_DISABLE;
6446235783Skib		}
6447235783Skib		I915_WRITE(CURCNTR_IVB(pipe), cntl);
6448235783Skib
6449235783Skib		intel_crtc->cursor_visible = visible;
6450235783Skib	}
6451235783Skib	/* and commit changes on next vblank */
6452235783Skib	I915_WRITE(CURBASE_IVB(pipe), base);
6453235783Skib}
6454235783Skib
6455235783Skib/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6456235783Skibstatic void intel_crtc_update_cursor(struct drm_crtc *crtc,
6457235783Skib				     bool on)
6458235783Skib{
6459235783Skib	struct drm_device *dev = crtc->dev;
6460235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6461235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6462235783Skib	int pipe = intel_crtc->pipe;
6463235783Skib	int x = intel_crtc->cursor_x;
6464235783Skib	int y = intel_crtc->cursor_y;
6465235783Skib	u32 base, pos;
6466235783Skib	bool visible;
6467235783Skib
6468235783Skib	pos = 0;
6469235783Skib
6470235783Skib	if (on && crtc->enabled && crtc->fb) {
6471235783Skib		base = intel_crtc->cursor_addr;
6472235783Skib		if (x > (int) crtc->fb->width)
6473235783Skib			base = 0;
6474235783Skib
6475235783Skib		if (y > (int) crtc->fb->height)
6476235783Skib			base = 0;
6477235783Skib	} else
6478235783Skib		base = 0;
6479235783Skib
6480235783Skib	if (x < 0) {
6481235783Skib		if (x + intel_crtc->cursor_width < 0)
6482235783Skib			base = 0;
6483235783Skib
6484235783Skib		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6485235783Skib		x = -x;
6486235783Skib	}
6487235783Skib	pos |= x << CURSOR_X_SHIFT;
6488235783Skib
6489235783Skib	if (y < 0) {
6490235783Skib		if (y + intel_crtc->cursor_height < 0)
6491235783Skib			base = 0;
6492235783Skib
6493235783Skib		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6494235783Skib		y = -y;
6495235783Skib	}
6496235783Skib	pos |= y << CURSOR_Y_SHIFT;
6497235783Skib
6498235783Skib	visible = base != 0;
6499235783Skib	if (!visible && !intel_crtc->cursor_visible)
6500235783Skib		return;
6501235783Skib
6502235783Skib	if (IS_IVYBRIDGE(dev)) {
6503235783Skib		I915_WRITE(CURPOS_IVB(pipe), pos);
6504235783Skib		ivb_update_cursor(crtc, base);
6505235783Skib	} else {
6506235783Skib		I915_WRITE(CURPOS(pipe), pos);
6507235783Skib		if (IS_845G(dev) || IS_I865G(dev))
6508235783Skib			i845_update_cursor(crtc, base);
6509235783Skib		else
6510235783Skib			i9xx_update_cursor(crtc, base);
6511235783Skib	}
6512235783Skib
6513235783Skib	if (visible)
6514235783Skib		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6515235783Skib}
6516235783Skib
6517235783Skibstatic int intel_crtc_cursor_set(struct drm_crtc *crtc,
6518235783Skib				 struct drm_file *file,
6519235783Skib				 uint32_t handle,
6520235783Skib				 uint32_t width, uint32_t height)
6521235783Skib{
6522235783Skib	struct drm_device *dev = crtc->dev;
6523235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6524235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6525235783Skib	struct drm_i915_gem_object *obj;
6526235783Skib	uint32_t addr;
6527235783Skib	int ret;
6528235783Skib
6529235783Skib	DRM_DEBUG_KMS("\n");
6530235783Skib
6531235783Skib	/* if we want to turn off the cursor ignore width and height */
6532235783Skib	if (!handle) {
6533235783Skib		DRM_DEBUG_KMS("cursor off\n");
6534235783Skib		addr = 0;
6535235783Skib		obj = NULL;
6536235783Skib		DRM_LOCK(dev);
6537235783Skib		goto finish;
6538235783Skib	}
6539235783Skib
6540235783Skib	/* Currently we only support 64x64 cursors */
6541235783Skib	if (width != 64 || height != 64) {
6542235783Skib		DRM_ERROR("we currently only support 64x64 cursors\n");
6543235783Skib		return -EINVAL;
6544235783Skib	}
6545235783Skib
6546235783Skib	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6547235783Skib	if (&obj->base == NULL)
6548235783Skib		return -ENOENT;
6549235783Skib
6550235783Skib	if (obj->base.size < width * height * 4) {
6551235783Skib		DRM_ERROR("buffer is to small\n");
6552235783Skib		ret = -ENOMEM;
6553235783Skib		goto fail;
6554235783Skib	}
6555235783Skib
6556235783Skib	/* we only need to pin inside GTT if cursor is non-phy */
6557235783Skib	DRM_LOCK(dev);
6558235783Skib	if (!dev_priv->info->cursor_needs_physical) {
6559235783Skib		if (obj->tiling_mode) {
6560235783Skib			DRM_ERROR("cursor cannot be tiled\n");
6561235783Skib			ret = -EINVAL;
6562235783Skib			goto fail_locked;
6563235783Skib		}
6564235783Skib
6565235783Skib		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6566235783Skib		if (ret) {
6567235783Skib			DRM_ERROR("failed to move cursor bo into the GTT\n");
6568235783Skib			goto fail_locked;
6569235783Skib		}
6570235783Skib
6571235783Skib		ret = i915_gem_object_put_fence(obj);
6572235783Skib		if (ret) {
6573235783Skib			DRM_ERROR("failed to release fence for cursor\n");
6574235783Skib			goto fail_unpin;
6575235783Skib		}
6576235783Skib
6577235783Skib		addr = obj->gtt_offset;
6578235783Skib	} else {
6579235783Skib		int align = IS_I830(dev) ? 16 * 1024 : 256;
6580235783Skib		ret = i915_gem_attach_phys_object(dev, obj,
6581235783Skib						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6582235783Skib						  align);
6583235783Skib		if (ret) {
6584235783Skib			DRM_ERROR("failed to attach phys object\n");
6585235783Skib			goto fail_locked;
6586235783Skib		}
6587235783Skib		addr = obj->phys_obj->handle->busaddr;
6588235783Skib	}
6589235783Skib
6590235783Skib	if (IS_GEN2(dev))
6591235783Skib		I915_WRITE(CURSIZE, (height << 12) | width);
6592235783Skib
6593235783Skib finish:
6594235783Skib	if (intel_crtc->cursor_bo) {
6595235783Skib		if (dev_priv->info->cursor_needs_physical) {
6596235783Skib			if (intel_crtc->cursor_bo != obj)
6597235783Skib				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6598235783Skib		} else
6599235783Skib			i915_gem_object_unpin(intel_crtc->cursor_bo);
6600235783Skib		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6601235783Skib	}
6602235783Skib
6603235783Skib	DRM_UNLOCK(dev);
6604235783Skib
6605235783Skib	intel_crtc->cursor_addr = addr;
6606235783Skib	intel_crtc->cursor_bo = obj;
6607235783Skib	intel_crtc->cursor_width = width;
6608235783Skib	intel_crtc->cursor_height = height;
6609235783Skib
6610235783Skib	intel_crtc_update_cursor(crtc, true);
6611235783Skib
6612235783Skib	return 0;
6613235783Skibfail_unpin:
6614235783Skib	i915_gem_object_unpin(obj);
6615235783Skibfail_locked:
6616235783Skib	DRM_UNLOCK(dev);
6617235783Skibfail:
6618235783Skib	drm_gem_object_unreference_unlocked(&obj->base);
6619235783Skib	return ret;
6620235783Skib}
6621235783Skib
6622235783Skibstatic int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6623235783Skib{
6624235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6625235783Skib
6626235783Skib	intel_crtc->cursor_x = x;
6627235783Skib	intel_crtc->cursor_y = y;
6628235783Skib
6629235783Skib	intel_crtc_update_cursor(crtc, true);
6630235783Skib
6631235783Skib	return 0;
6632235783Skib}
6633235783Skib
6634235783Skib/** Sets the color ramps on behalf of RandR */
6635235783Skibvoid intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6636235783Skib				 u16 blue, int regno)
6637235783Skib{
6638235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6639235783Skib
6640235783Skib	intel_crtc->lut_r[regno] = red >> 8;
6641235783Skib	intel_crtc->lut_g[regno] = green >> 8;
6642235783Skib	intel_crtc->lut_b[regno] = blue >> 8;
6643235783Skib}
6644235783Skib
6645235783Skibvoid intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6646235783Skib			     u16 *blue, int regno)
6647235783Skib{
6648235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6649235783Skib
6650235783Skib	*red = intel_crtc->lut_r[regno] << 8;
6651235783Skib	*green = intel_crtc->lut_g[regno] << 8;
6652235783Skib	*blue = intel_crtc->lut_b[regno] << 8;
6653235783Skib}
6654235783Skib
6655235783Skibstatic void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6656235783Skib				 u16 *blue, uint32_t start, uint32_t size)
6657235783Skib{
6658235783Skib	int end = (start + size > 256) ? 256 : start + size, i;
6659235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6660235783Skib
6661235783Skib	for (i = start; i < end; i++) {
6662235783Skib		intel_crtc->lut_r[i] = red[i] >> 8;
6663235783Skib		intel_crtc->lut_g[i] = green[i] >> 8;
6664235783Skib		intel_crtc->lut_b[i] = blue[i] >> 8;
6665235783Skib	}
6666235783Skib
6667235783Skib	intel_crtc_load_lut(crtc);
6668235783Skib}
6669235783Skib
6670235783Skib/**
6671235783Skib * Get a pipe with a simple mode set on it for doing load-based monitor
6672235783Skib * detection.
6673235783Skib *
6674235783Skib * It will be up to the load-detect code to adjust the pipe as appropriate for
6675235783Skib * its requirements.  The pipe will be connected to no other encoders.
6676235783Skib *
6677235783Skib * Currently this code will only succeed if there is a pipe with no encoders
6678235783Skib * configured for it.  In the future, it could choose to temporarily disable
6679235783Skib * some outputs to free up a pipe for its use.
6680235783Skib *
6681235783Skib * \return crtc, or NULL if no pipes are available.
6682235783Skib */
6683235783Skib
6684235783Skib/* VESA 640x480x72Hz mode to set on the pipe */
6685235783Skibstatic struct drm_display_mode load_detect_mode = {
6686235783Skib	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6687235783Skib		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6688235783Skib};
6689235783Skib
6690235783Skibstatic int
6691235783Skibintel_framebuffer_create(struct drm_device *dev,
6692235783Skib    struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj,
6693235783Skib     struct drm_framebuffer **res)
6694235783Skib{
6695235783Skib	struct intel_framebuffer *intel_fb;
6696235783Skib	int ret;
6697235783Skib
6698235783Skib	intel_fb = malloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
6699235783Skib	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6700235783Skib	if (ret) {
6701235783Skib		drm_gem_object_unreference_unlocked(&obj->base);
6702235783Skib		free(intel_fb, DRM_MEM_KMS);
6703235783Skib		return (ret);
6704235783Skib	}
6705235783Skib
6706235783Skib	*res = &intel_fb->base;
6707235783Skib	return (0);
6708235783Skib}
6709235783Skib
6710235783Skibstatic u32
6711235783Skibintel_framebuffer_pitch_for_width(int width, int bpp)
6712235783Skib{
6713235783Skib	u32 pitch = howmany(width * bpp, 8);
6714235783Skib	return roundup2(pitch, 64);
6715235783Skib}
6716235783Skib
6717235783Skibstatic u32
6718235783Skibintel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6719235783Skib{
6720235783Skib	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6721235783Skib	return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
6722235783Skib}
6723235783Skib
6724235783Skibstatic int
6725235783Skibintel_framebuffer_create_for_mode(struct drm_device *dev,
6726235783Skib    struct drm_display_mode *mode, int depth, int bpp,
6727235783Skib    struct drm_framebuffer **res)
6728235783Skib{
6729235783Skib	struct drm_i915_gem_object *obj;
6730235783Skib	struct drm_mode_fb_cmd2 mode_cmd;
6731235783Skib
6732235783Skib	obj = i915_gem_alloc_object(dev,
6733235783Skib				    intel_framebuffer_size_for_mode(mode, bpp));
6734235783Skib	if (obj == NULL)
6735235783Skib		return (-ENOMEM);
6736235783Skib
6737235783Skib	mode_cmd.width = mode->hdisplay;
6738235783Skib	mode_cmd.height = mode->vdisplay;
6739235783Skib	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6740235783Skib								bpp);
6741235783Skib	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6742235783Skib
6743235783Skib	return (intel_framebuffer_create(dev, &mode_cmd, obj, res));
6744235783Skib}
6745235783Skib
6746235783Skibstatic int
6747235783Skibmode_fits_in_fbdev(struct drm_device *dev,
6748235783Skib    struct drm_display_mode *mode, struct drm_framebuffer **res)
6749235783Skib{
6750235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6751235783Skib	struct drm_i915_gem_object *obj;
6752235783Skib	struct drm_framebuffer *fb;
6753235783Skib
6754235783Skib	if (dev_priv->fbdev == NULL) {
6755235783Skib		*res = NULL;
6756235783Skib		return (0);
6757235783Skib	}
6758235783Skib
6759235783Skib	obj = dev_priv->fbdev->ifb.obj;
6760235783Skib	if (obj == NULL) {
6761235783Skib		*res = NULL;
6762235783Skib		return (0);
6763235783Skib	}
6764235783Skib
6765235783Skib	fb = &dev_priv->fbdev->ifb.base;
6766235783Skib	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6767235783Skib	    fb->bits_per_pixel)) {
6768235783Skib		*res = NULL;
6769235783Skib		return (0);
6770235783Skib	}
6771235783Skib
6772235783Skib	if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
6773235783Skib		*res = NULL;
6774235783Skib		return (0);
6775235783Skib	}
6776235783Skib
6777235783Skib	*res = fb;
6778235783Skib	return (0);
6779235783Skib}
6780235783Skib
6781235783Skibbool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6782235783Skib				struct drm_connector *connector,
6783235783Skib				struct drm_display_mode *mode,
6784235783Skib				struct intel_load_detect_pipe *old)
6785235783Skib{
6786235783Skib	struct intel_crtc *intel_crtc;
6787235783Skib	struct drm_crtc *possible_crtc;
6788235783Skib	struct drm_encoder *encoder = &intel_encoder->base;
6789235783Skib	struct drm_crtc *crtc = NULL;
6790235783Skib	struct drm_device *dev = encoder->dev;
6791235783Skib	struct drm_framebuffer *old_fb;
6792235783Skib	int i = -1, r;
6793235783Skib
6794235783Skib	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6795235783Skib		      connector->base.id, drm_get_connector_name(connector),
6796235783Skib		      encoder->base.id, drm_get_encoder_name(encoder));
6797235783Skib
6798235783Skib	/*
6799235783Skib	 * Algorithm gets a little messy:
6800235783Skib	 *
6801235783Skib	 *   - if the connector already has an assigned crtc, use it (but make
6802235783Skib	 *     sure it's on first)
6803235783Skib	 *
6804235783Skib	 *   - try to find the first unused crtc that can drive this connector,
6805235783Skib	 *     and use that if we find one
6806235783Skib	 */
6807235783Skib
6808235783Skib	/* See if we already have a CRTC for this connector */
6809235783Skib	if (encoder->crtc) {
6810235783Skib		crtc = encoder->crtc;
6811235783Skib
6812235783Skib		intel_crtc = to_intel_crtc(crtc);
6813235783Skib		old->dpms_mode = intel_crtc->dpms_mode;
6814235783Skib		old->load_detect_temp = false;
6815235783Skib
6816235783Skib		/* Make sure the crtc and connector are running */
6817235783Skib		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6818235783Skib			struct drm_encoder_helper_funcs *encoder_funcs;
6819235783Skib			struct drm_crtc_helper_funcs *crtc_funcs;
6820235783Skib
6821235783Skib			crtc_funcs = crtc->helper_private;
6822235783Skib			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6823235783Skib
6824235783Skib			encoder_funcs = encoder->helper_private;
6825235783Skib			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6826235783Skib		}
6827235783Skib
6828235783Skib		return true;
6829235783Skib	}
6830235783Skib
6831235783Skib	/* Find an unused one (if possible) */
6832235783Skib	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6833235783Skib		i++;
6834235783Skib		if (!(encoder->possible_crtcs & (1 << i)))
6835235783Skib			continue;
6836235783Skib		if (!possible_crtc->enabled) {
6837235783Skib			crtc = possible_crtc;
6838235783Skib			break;
6839235783Skib		}
6840235783Skib	}
6841235783Skib
6842235783Skib	/*
6843235783Skib	 * If we didn't find an unused CRTC, don't use any.
6844235783Skib	 */
6845235783Skib	if (!crtc) {
6846235783Skib		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6847235783Skib		return false;
6848235783Skib	}
6849235783Skib
6850235783Skib	encoder->crtc = crtc;
6851235783Skib	connector->encoder = encoder;
6852235783Skib
6853235783Skib	intel_crtc = to_intel_crtc(crtc);
6854235783Skib	old->dpms_mode = intel_crtc->dpms_mode;
6855235783Skib	old->load_detect_temp = true;
6856235783Skib	old->release_fb = NULL;
6857235783Skib
6858235783Skib	if (!mode)
6859235783Skib		mode = &load_detect_mode;
6860235783Skib
6861235783Skib	old_fb = crtc->fb;
6862235783Skib
6863235783Skib	/* We need a framebuffer large enough to accommodate all accesses
6864235783Skib	 * that the plane may generate whilst we perform load detection.
6865235783Skib	 * We can not rely on the fbcon either being present (we get called
6866235783Skib	 * during its initialisation to detect all boot displays, or it may
6867235783Skib	 * not even exist) or that it is large enough to satisfy the
6868235783Skib	 * requested mode.
6869235783Skib	 */
6870235783Skib	r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
6871235783Skib	if (crtc->fb == NULL) {
6872235783Skib		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6873235783Skib		r = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
6874235783Skib		    &crtc->fb);
6875235783Skib		old->release_fb = crtc->fb;
6876235783Skib	} else
6877235783Skib		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6878235783Skib	if (r != 0) {
6879235783Skib		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6880235783Skib		crtc->fb = old_fb;
6881235783Skib		return false;
6882235783Skib	}
6883235783Skib
6884235783Skib	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6885235783Skib		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6886235783Skib		if (old->release_fb)
6887235783Skib			old->release_fb->funcs->destroy(old->release_fb);
6888235783Skib		crtc->fb = old_fb;
6889235783Skib		return false;
6890235783Skib	}
6891235783Skib
6892235783Skib	/* let the connector get through one full cycle before testing */
6893235783Skib	intel_wait_for_vblank(dev, intel_crtc->pipe);
6894235783Skib
6895235783Skib	return true;
6896235783Skib}
6897235783Skib
6898235783Skibvoid intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6899235783Skib				    struct drm_connector *connector,
6900235783Skib				    struct intel_load_detect_pipe *old)
6901235783Skib{
6902235783Skib	struct drm_encoder *encoder = &intel_encoder->base;
6903235783Skib	struct drm_device *dev = encoder->dev;
6904235783Skib	struct drm_crtc *crtc = encoder->crtc;
6905235783Skib	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6906235783Skib	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6907235783Skib
6908235783Skib	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6909235783Skib		      connector->base.id, drm_get_connector_name(connector),
6910235783Skib		      encoder->base.id, drm_get_encoder_name(encoder));
6911235783Skib
6912235783Skib	if (old->load_detect_temp) {
6913235783Skib		connector->encoder = NULL;
6914235783Skib		drm_helper_disable_unused_functions(dev);
6915235783Skib
6916235783Skib		if (old->release_fb)
6917235783Skib			old->release_fb->funcs->destroy(old->release_fb);
6918235783Skib
6919235783Skib		return;
6920235783Skib	}
6921235783Skib
6922235783Skib	/* Switch crtc and encoder back off if necessary */
6923235783Skib	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6924235783Skib		encoder_funcs->dpms(encoder, old->dpms_mode);
6925235783Skib		crtc_funcs->dpms(crtc, old->dpms_mode);
6926235783Skib	}
6927235783Skib}
6928235783Skib
6929235783Skib/* Returns the clock of the currently programmed mode of the given pipe. */
6930235783Skibstatic int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6931235783Skib{
6932235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
6933235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6934235783Skib	int pipe = intel_crtc->pipe;
6935235783Skib	u32 dpll = I915_READ(DPLL(pipe));
6936235783Skib	u32 fp;
6937235783Skib	intel_clock_t clock;
6938235783Skib
6939235783Skib	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6940235783Skib		fp = I915_READ(FP0(pipe));
6941235783Skib	else
6942235783Skib		fp = I915_READ(FP1(pipe));
6943235783Skib
6944235783Skib	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6945235783Skib	if (IS_PINEVIEW(dev)) {
6946235783Skib		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6947235783Skib		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6948235783Skib	} else {
6949235783Skib		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6950235783Skib		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6951235783Skib	}
6952235783Skib
6953235783Skib	if (!IS_GEN2(dev)) {
6954235783Skib		if (IS_PINEVIEW(dev))
6955235783Skib			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6956235783Skib				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6957235783Skib		else
6958235783Skib			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6959235783Skib			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6960235783Skib
6961235783Skib		switch (dpll & DPLL_MODE_MASK) {
6962235783Skib		case DPLLB_MODE_DAC_SERIAL:
6963235783Skib			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6964235783Skib				5 : 10;
6965235783Skib			break;
6966235783Skib		case DPLLB_MODE_LVDS:
6967235783Skib			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6968235783Skib				7 : 14;
6969235783Skib			break;
6970235783Skib		default:
6971235783Skib			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6972235783Skib				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6973235783Skib			return 0;
6974235783Skib		}
6975235783Skib
6976235783Skib		/* XXX: Handle the 100Mhz refclk */
6977235783Skib		intel_clock(dev, 96000, &clock);
6978235783Skib	} else {
6979235783Skib		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6980235783Skib
6981235783Skib		if (is_lvds) {
6982235783Skib			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6983235783Skib				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6984235783Skib			clock.p2 = 14;
6985235783Skib
6986235783Skib			if ((dpll & PLL_REF_INPUT_MASK) ==
6987235783Skib			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6988235783Skib				/* XXX: might not be 66MHz */
6989235783Skib				intel_clock(dev, 66000, &clock);
6990235783Skib			} else
6991235783Skib				intel_clock(dev, 48000, &clock);
6992235783Skib		} else {
6993235783Skib			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6994235783Skib				clock.p1 = 2;
6995235783Skib			else {
6996235783Skib				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6997235783Skib					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6998235783Skib			}
6999235783Skib			if (dpll & PLL_P2_DIVIDE_BY_4)
7000235783Skib				clock.p2 = 4;
7001235783Skib			else
7002235783Skib				clock.p2 = 2;
7003235783Skib
7004235783Skib			intel_clock(dev, 48000, &clock);
7005235783Skib		}
7006235783Skib	}
7007235783Skib
7008235783Skib	/* XXX: It would be nice to validate the clocks, but we can't reuse
7009235783Skib	 * i830PllIsValid() because it relies on the xf86_config connector
7010235783Skib	 * configuration being accurate, which it isn't necessarily.
7011235783Skib	 */
7012235783Skib
7013235783Skib	return clock.dot;
7014235783Skib}
7015235783Skib
7016235783Skib/** Returns the currently programmed mode of the given pipe. */
7017235783Skibstruct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7018235783Skib					     struct drm_crtc *crtc)
7019235783Skib{
7020235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7021235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7022235783Skib	int pipe = intel_crtc->pipe;
7023235783Skib	struct drm_display_mode *mode;
7024235783Skib	int htot = I915_READ(HTOTAL(pipe));
7025235783Skib	int hsync = I915_READ(HSYNC(pipe));
7026235783Skib	int vtot = I915_READ(VTOTAL(pipe));
7027235783Skib	int vsync = I915_READ(VSYNC(pipe));
7028235783Skib
7029235783Skib	mode = malloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
7030235783Skib
7031235783Skib	mode->clock = intel_crtc_clock_get(dev, crtc);
7032235783Skib	mode->hdisplay = (htot & 0xffff) + 1;
7033235783Skib	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7034235783Skib	mode->hsync_start = (hsync & 0xffff) + 1;
7035235783Skib	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7036235783Skib	mode->vdisplay = (vtot & 0xffff) + 1;
7037235783Skib	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7038235783Skib	mode->vsync_start = (vsync & 0xffff) + 1;
7039235783Skib	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7040235783Skib
7041235783Skib	drm_mode_set_name(mode);
7042235783Skib	drm_mode_set_crtcinfo(mode, 0);
7043235783Skib
7044235783Skib	return mode;
7045235783Skib}
7046235783Skib
7047235783Skib#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
7048235783Skib
7049235783Skib/* When this timer fires, we've been idle for awhile */
7050235783Skibstatic void intel_gpu_idle_timer(void *arg)
7051235783Skib{
7052235783Skib	struct drm_device *dev = arg;
7053235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7054235783Skib
7055235783Skib	if (!list_empty(&dev_priv->mm.active_list)) {
7056235783Skib		/* Still processing requests, so just re-arm the timer. */
7057235783Skib		callout_schedule(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT);
7058235783Skib		return;
7059235783Skib	}
7060235783Skib
7061235783Skib	dev_priv->busy = false;
7062235783Skib	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7063235783Skib}
7064235783Skib
7065235783Skib#define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
7066235783Skib
7067235783Skibstatic void intel_crtc_idle_timer(void *arg)
7068235783Skib{
7069235783Skib	struct intel_crtc *intel_crtc = arg;
7070235783Skib	struct drm_crtc *crtc = &intel_crtc->base;
7071235783Skib	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7072235783Skib	struct intel_framebuffer *intel_fb;
7073235783Skib
7074235783Skib	intel_fb = to_intel_framebuffer(crtc->fb);
7075235783Skib	if (intel_fb && intel_fb->obj->active) {
7076235783Skib		/* The framebuffer is still being accessed by the GPU. */
7077235783Skib		callout_schedule(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT);
7078235783Skib		return;
7079235783Skib	}
7080235783Skib
7081235783Skib	intel_crtc->busy = false;
7082235783Skib	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7083235783Skib}
7084235783Skib
7085235783Skibstatic void intel_increase_pllclock(struct drm_crtc *crtc)
7086235783Skib{
7087235783Skib	struct drm_device *dev = crtc->dev;
7088235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7089235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7090235783Skib	int pipe = intel_crtc->pipe;
7091235783Skib	int dpll_reg = DPLL(pipe);
7092235783Skib	int dpll;
7093235783Skib
7094235783Skib	if (HAS_PCH_SPLIT(dev))
7095235783Skib		return;
7096235783Skib
7097235783Skib	if (!dev_priv->lvds_downclock_avail)
7098235783Skib		return;
7099235783Skib
7100235783Skib	dpll = I915_READ(dpll_reg);
7101235783Skib	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7102235783Skib		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7103235783Skib
7104235783Skib		assert_panel_unlocked(dev_priv, pipe);
7105235783Skib
7106235783Skib		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7107235783Skib		I915_WRITE(dpll_reg, dpll);
7108235783Skib		intel_wait_for_vblank(dev, pipe);
7109235783Skib
7110235783Skib		dpll = I915_READ(dpll_reg);
7111235783Skib		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7112235783Skib			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7113235783Skib	}
7114235783Skib
7115235783Skib	/* Schedule downclock */
7116235783Skib	callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
7117235783Skib	    intel_crtc_idle_timer, intel_crtc);
7118235783Skib}
7119235783Skib
7120235783Skibstatic void intel_decrease_pllclock(struct drm_crtc *crtc)
7121235783Skib{
7122235783Skib	struct drm_device *dev = crtc->dev;
7123235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7124235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7125235783Skib
7126235783Skib	if (HAS_PCH_SPLIT(dev))
7127235783Skib		return;
7128235783Skib
7129235783Skib	if (!dev_priv->lvds_downclock_avail)
7130235783Skib		return;
7131235783Skib
7132235783Skib	/*
7133235783Skib	 * Since this is called by a timer, we should never get here in
7134235783Skib	 * the manual case.
7135235783Skib	 */
7136235783Skib	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7137235783Skib		int pipe = intel_crtc->pipe;
7138235783Skib		int dpll_reg = DPLL(pipe);
7139235783Skib		u32 dpll;
7140235783Skib
7141235783Skib		DRM_DEBUG_DRIVER("downclocking LVDS\n");
7142235783Skib
7143235783Skib		assert_panel_unlocked(dev_priv, pipe);
7144235783Skib
7145235783Skib		dpll = I915_READ(dpll_reg);
7146235783Skib		dpll |= DISPLAY_RATE_SELECT_FPA1;
7147235783Skib		I915_WRITE(dpll_reg, dpll);
7148235783Skib		intel_wait_for_vblank(dev, pipe);
7149235783Skib		dpll = I915_READ(dpll_reg);
7150235783Skib		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7151235783Skib			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7152235783Skib	}
7153235783Skib}
7154235783Skib
7155235783Skib/**
7156235783Skib * intel_idle_update - adjust clocks for idleness
7157235783Skib * @work: work struct
7158235783Skib *
7159235783Skib * Either the GPU or display (or both) went idle.  Check the busy status
7160235783Skib * here and adjust the CRTC and GPU clocks as necessary.
7161235783Skib */
7162235783Skibstatic void intel_idle_update(void *arg, int pending)
7163235783Skib{
7164235783Skib	drm_i915_private_t *dev_priv = arg;
7165235783Skib	struct drm_device *dev = dev_priv->dev;
7166235783Skib	struct drm_crtc *crtc;
7167235783Skib	struct intel_crtc *intel_crtc;
7168235783Skib
7169235783Skib	if (!i915_powersave)
7170235783Skib		return;
7171235783Skib
7172235783Skib	DRM_LOCK(dev);
7173235783Skib
7174235783Skib	i915_update_gfx_val(dev_priv);
7175235783Skib
7176235783Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7177235783Skib		/* Skip inactive CRTCs */
7178235783Skib		if (!crtc->fb)
7179235783Skib			continue;
7180235783Skib
7181235783Skib		intel_crtc = to_intel_crtc(crtc);
7182235783Skib		if (!intel_crtc->busy)
7183235783Skib			intel_decrease_pllclock(crtc);
7184235783Skib	}
7185235783Skib
7186235783Skib	DRM_UNLOCK(dev);
7187235783Skib}
7188235783Skib
7189235783Skib/**
7190235783Skib * intel_mark_busy - mark the GPU and possibly the display busy
7191235783Skib * @dev: drm device
7192235783Skib * @obj: object we're operating on
7193235783Skib *
7194235783Skib * Callers can use this function to indicate that the GPU is busy processing
7195235783Skib * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7196235783Skib * buffer), we'll also mark the display as busy, so we know to increase its
7197235783Skib * clock frequency.
7198235783Skib */
7199235783Skibvoid intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7200235783Skib{
7201235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7202235783Skib	struct drm_crtc *crtc = NULL;
7203235783Skib	struct intel_framebuffer *intel_fb;
7204235783Skib	struct intel_crtc *intel_crtc;
7205235783Skib
7206235783Skib	if (!drm_core_check_feature(dev, DRIVER_MODESET))
7207235783Skib		return;
7208235783Skib
7209235783Skib	if (!dev_priv->busy)
7210235783Skib		dev_priv->busy = true;
7211235783Skib	else
7212235783Skib		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7213235783Skib		    intel_gpu_idle_timer, dev);
7214235783Skib
7215235783Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7216235783Skib		if (!crtc->fb)
7217235783Skib			continue;
7218235783Skib
7219235783Skib		intel_crtc = to_intel_crtc(crtc);
7220235783Skib		intel_fb = to_intel_framebuffer(crtc->fb);
7221235783Skib		if (intel_fb->obj == obj) {
7222235783Skib			if (!intel_crtc->busy) {
7223235783Skib				/* Non-busy -> busy, upclock */
7224235783Skib				intel_increase_pllclock(crtc);
7225235783Skib				intel_crtc->busy = true;
7226235783Skib			} else {
7227235783Skib				/* Busy -> busy, put off timer */
7228235783Skib				callout_reset(&intel_crtc->idle_callout,
7229235783Skib				    CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
7230235783Skib				    intel_crtc);
7231235783Skib			}
7232235783Skib		}
7233235783Skib	}
7234235783Skib}
7235235783Skib
7236235783Skibstatic void intel_crtc_destroy(struct drm_crtc *crtc)
7237235783Skib{
7238235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7239235783Skib	struct drm_device *dev = crtc->dev;
7240235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7241235783Skib	struct intel_unpin_work *work;
7242235783Skib
7243235783Skib	mtx_lock(&dev->event_lock);
7244235783Skib	work = intel_crtc->unpin_work;
7245235783Skib	intel_crtc->unpin_work = NULL;
7246235783Skib	mtx_unlock(&dev->event_lock);
7247235783Skib
7248235783Skib	if (work) {
7249235783Skib		taskqueue_cancel(dev_priv->tq, &work->task, NULL);
7250235783Skib		taskqueue_drain(dev_priv->tq, &work->task);
7251235783Skib		free(work, DRM_MEM_KMS);
7252235783Skib	}
7253235783Skib
7254235783Skib	drm_crtc_cleanup(crtc);
7255235783Skib
7256235783Skib	free(intel_crtc, DRM_MEM_KMS);
7257235783Skib}
7258235783Skib
7259235783Skibstatic void intel_unpin_work_fn(void *arg, int pending)
7260235783Skib{
7261235783Skib	struct intel_unpin_work *work = arg;
7262235783Skib	struct drm_device *dev;
7263235783Skib
7264235783Skib	dev = work->dev;
7265235783Skib	DRM_LOCK(dev);
7266235783Skib	intel_unpin_fb_obj(work->old_fb_obj);
7267235783Skib	drm_gem_object_unreference(&work->pending_flip_obj->base);
7268235783Skib	drm_gem_object_unreference(&work->old_fb_obj->base);
7269235783Skib
7270235783Skib	intel_update_fbc(work->dev);
7271235783Skib	DRM_UNLOCK(dev);
7272235783Skib	free(work, DRM_MEM_KMS);
7273235783Skib}
7274235783Skib
7275235783Skibstatic void do_intel_finish_page_flip(struct drm_device *dev,
7276235783Skib				      struct drm_crtc *crtc)
7277235783Skib{
7278235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7279235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7280235783Skib	struct intel_unpin_work *work;
7281235783Skib	struct drm_i915_gem_object *obj;
7282235783Skib	struct drm_pending_vblank_event *e;
7283235783Skib	struct timeval tnow, tvbl;
7284235783Skib
7285235783Skib	/* Ignore early vblank irqs */
7286235783Skib	if (intel_crtc == NULL)
7287235783Skib		return;
7288235783Skib
7289235783Skib	microtime(&tnow);
7290235783Skib
7291235783Skib	mtx_lock(&dev->event_lock);
7292235783Skib	work = intel_crtc->unpin_work;
7293235783Skib	if (work == NULL || !work->pending) {
7294235783Skib		mtx_unlock(&dev->event_lock);
7295235783Skib		return;
7296235783Skib	}
7297235783Skib
7298235783Skib	intel_crtc->unpin_work = NULL;
7299235783Skib
7300235783Skib	if (work->event) {
7301235783Skib		e = work->event;
7302235783Skib		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7303235783Skib
7304235783Skib		/* Called before vblank count and timestamps have
7305235783Skib		 * been updated for the vblank interval of flip
7306235783Skib		 * completion? Need to increment vblank count and
7307235783Skib		 * add one videorefresh duration to returned timestamp
7308235783Skib		 * to account for this. We assume this happened if we
7309235783Skib		 * get called over 0.9 frame durations after the last
7310235783Skib		 * timestamped vblank.
7311235783Skib		 *
7312235783Skib		 * This calculation can not be used with vrefresh rates
7313235783Skib		 * below 5Hz (10Hz to be on the safe side) without
7314235783Skib		 * promoting to 64 integers.
7315235783Skib		 */
7316235783Skib		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7317235783Skib		    9 * crtc->framedur_ns) {
7318235783Skib			e->event.sequence++;
7319235783Skib			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7320235783Skib					     crtc->framedur_ns);
7321235783Skib		}
7322235783Skib
7323235783Skib		e->event.tv_sec = tvbl.tv_sec;
7324235783Skib		e->event.tv_usec = tvbl.tv_usec;
7325235783Skib
7326235783Skib		list_add_tail(&e->base.link,
7327235783Skib			      &e->base.file_priv->event_list);
7328235783Skib		drm_event_wakeup(&e->base);
7329235783Skib	}
7330235783Skib
7331235783Skib	drm_vblank_put(dev, intel_crtc->pipe);
7332235783Skib
7333235783Skib	obj = work->old_fb_obj;
7334235783Skib
7335235783Skib	atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
7336255013Sjkim	if (atomic_load_acq_int(&obj->pending_flip) == 0)
7337235783Skib		wakeup(&obj->pending_flip);
7338235783Skib	mtx_unlock(&dev->event_lock);
7339235783Skib
7340235783Skib	taskqueue_enqueue(dev_priv->tq, &work->task);
7341235783Skib
7342235783Skib	CTR2(KTR_DRM, "i915_flip_complete %d %p", intel_crtc->plane,
7343235783Skib	    work->pending_flip_obj);
7344235783Skib}
7345235783Skib
7346235783Skibvoid intel_finish_page_flip(struct drm_device *dev, int pipe)
7347235783Skib{
7348235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7349235783Skib	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7350235783Skib
7351235783Skib	do_intel_finish_page_flip(dev, crtc);
7352235783Skib}
7353235783Skib
7354235783Skibvoid intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7355235783Skib{
7356235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7357235783Skib	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7358235783Skib
7359235783Skib	do_intel_finish_page_flip(dev, crtc);
7360235783Skib}
7361235783Skib
7362235783Skibvoid intel_prepare_page_flip(struct drm_device *dev, int plane)
7363235783Skib{
7364235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7365235783Skib	struct intel_crtc *intel_crtc =
7366235783Skib		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7367235783Skib
7368235783Skib	mtx_lock(&dev->event_lock);
7369235783Skib	if (intel_crtc->unpin_work) {
7370235783Skib		if ((++intel_crtc->unpin_work->pending) > 1)
7371235783Skib			DRM_ERROR("Prepared flip multiple times\n");
7372235783Skib	} else {
7373235783Skib		DRM_DEBUG("preparing flip with no unpin work?\n");
7374235783Skib	}
7375235783Skib	mtx_unlock(&dev->event_lock);
7376235783Skib}
7377235783Skib
7378235783Skibstatic int intel_gen2_queue_flip(struct drm_device *dev,
7379235783Skib				 struct drm_crtc *crtc,
7380235783Skib				 struct drm_framebuffer *fb,
7381235783Skib				 struct drm_i915_gem_object *obj)
7382235783Skib{
7383235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7384235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7385235783Skib	unsigned long offset;
7386235783Skib	u32 flip_mask;
7387235783Skib	int ret;
7388235783Skib
7389235783Skib	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7390235783Skib	if (ret)
7391235783Skib		goto out;
7392235783Skib
7393235783Skib	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7394235783Skib	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7395235783Skib
7396235783Skib	ret = BEGIN_LP_RING(6);
7397235783Skib	if (ret)
7398235783Skib		goto out;
7399235783Skib
7400235783Skib	/* Can't queue multiple flips, so wait for the previous
7401235783Skib	 * one to finish before executing the next.
7402235783Skib	 */
7403235783Skib	if (intel_crtc->plane)
7404235783Skib		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7405235783Skib	else
7406235783Skib		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7407235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7408235783Skib	OUT_RING(MI_NOOP);
7409235783Skib	OUT_RING(MI_DISPLAY_FLIP |
7410235783Skib		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7411235783Skib	OUT_RING(fb->pitches[0]);
7412235783Skib	OUT_RING(obj->gtt_offset + offset);
7413235783Skib	OUT_RING(0); /* aux display base address, unused */
7414235783Skib	ADVANCE_LP_RING();
7415235783Skibout:
7416235783Skib	return ret;
7417235783Skib}
7418235783Skib
7419235783Skibstatic int intel_gen3_queue_flip(struct drm_device *dev,
7420235783Skib				 struct drm_crtc *crtc,
7421235783Skib				 struct drm_framebuffer *fb,
7422235783Skib				 struct drm_i915_gem_object *obj)
7423235783Skib{
7424235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7425235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7426235783Skib	unsigned long offset;
7427235783Skib	u32 flip_mask;
7428235783Skib	int ret;
7429235783Skib
7430235783Skib	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7431235783Skib	if (ret)
7432235783Skib		goto out;
7433235783Skib
7434235783Skib	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7435235783Skib	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7436235783Skib
7437235783Skib	ret = BEGIN_LP_RING(6);
7438235783Skib	if (ret)
7439235783Skib		goto out;
7440235783Skib
7441235783Skib	if (intel_crtc->plane)
7442235783Skib		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7443235783Skib	else
7444235783Skib		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7445235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7446235783Skib	OUT_RING(MI_NOOP);
7447235783Skib	OUT_RING(MI_DISPLAY_FLIP_I915 |
7448235783Skib		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7449235783Skib	OUT_RING(fb->pitches[0]);
7450235783Skib	OUT_RING(obj->gtt_offset + offset);
7451235783Skib	OUT_RING(MI_NOOP);
7452235783Skib
7453235783Skib	ADVANCE_LP_RING();
7454235783Skibout:
7455235783Skib	return ret;
7456235783Skib}
7457235783Skib
7458235783Skibstatic int intel_gen4_queue_flip(struct drm_device *dev,
7459235783Skib				 struct drm_crtc *crtc,
7460235783Skib				 struct drm_framebuffer *fb,
7461235783Skib				 struct drm_i915_gem_object *obj)
7462235783Skib{
7463235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7464235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7465235783Skib	uint32_t pf, pipesrc;
7466235783Skib	int ret;
7467235783Skib
7468235783Skib	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7469235783Skib	if (ret)
7470235783Skib		goto out;
7471235783Skib
7472235783Skib	ret = BEGIN_LP_RING(4);
7473235783Skib	if (ret)
7474235783Skib		goto out;
7475235783Skib
7476235783Skib	/* i965+ uses the linear or tiled offsets from the
7477235783Skib	 * Display Registers (which do not change across a page-flip)
7478235783Skib	 * so we need only reprogram the base address.
7479235783Skib	 */
7480235783Skib	OUT_RING(MI_DISPLAY_FLIP |
7481235783Skib		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7482235783Skib	OUT_RING(fb->pitches[0]);
7483235783Skib	OUT_RING(obj->gtt_offset | obj->tiling_mode);
7484235783Skib
7485235783Skib	/* XXX Enabling the panel-fitter across page-flip is so far
7486235783Skib	 * untested on non-native modes, so ignore it for now.
7487235783Skib	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7488235783Skib	 */
7489235783Skib	pf = 0;
7490235783Skib	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7491235783Skib	OUT_RING(pf | pipesrc);
7492235783Skib	ADVANCE_LP_RING();
7493235783Skibout:
7494235783Skib	return ret;
7495235783Skib}
7496235783Skib
7497235783Skibstatic int intel_gen6_queue_flip(struct drm_device *dev,
7498235783Skib				 struct drm_crtc *crtc,
7499235783Skib				 struct drm_framebuffer *fb,
7500235783Skib				 struct drm_i915_gem_object *obj)
7501235783Skib{
7502235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7503235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7504235783Skib	uint32_t pf, pipesrc;
7505235783Skib	int ret;
7506235783Skib
7507235783Skib	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7508235783Skib	if (ret)
7509235783Skib		goto out;
7510235783Skib
7511235783Skib	ret = BEGIN_LP_RING(4);
7512235783Skib	if (ret)
7513235783Skib		goto out;
7514235783Skib
7515235783Skib	OUT_RING(MI_DISPLAY_FLIP |
7516235783Skib		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7517235783Skib	OUT_RING(fb->pitches[0] | obj->tiling_mode);
7518235783Skib	OUT_RING(obj->gtt_offset);
7519235783Skib
7520235783Skib	/* Contrary to the suggestions in the documentation,
7521235783Skib	 * "Enable Panel Fitter" does not seem to be required when page
7522235783Skib	 * flipping with a non-native mode, and worse causes a normal
7523235783Skib	 * modeset to fail.
7524235783Skib	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7525235783Skib	 */
7526235783Skib	pf = 0;
7527235783Skib	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7528235783Skib	OUT_RING(pf | pipesrc);
7529235783Skib	ADVANCE_LP_RING();
7530235783Skibout:
7531235783Skib	return ret;
7532235783Skib}
7533235783Skib
7534235783Skib/*
7535235783Skib * On gen7 we currently use the blit ring because (in early silicon at least)
7536235783Skib * the render ring doesn't give us interrpts for page flip completion, which
7537235783Skib * means clients will hang after the first flip is queued.  Fortunately the
7538235783Skib * blit ring generates interrupts properly, so use it instead.
7539235783Skib */
7540235783Skibstatic int intel_gen7_queue_flip(struct drm_device *dev,
7541235783Skib				 struct drm_crtc *crtc,
7542235783Skib				 struct drm_framebuffer *fb,
7543235783Skib				 struct drm_i915_gem_object *obj)
7544235783Skib{
7545235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7546235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7547235783Skib	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
7548235783Skib	int ret;
7549235783Skib
7550235783Skib	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7551235783Skib	if (ret)
7552235783Skib		goto out;
7553235783Skib
7554235783Skib	ret = intel_ring_begin(ring, 4);
7555235783Skib	if (ret)
7556235783Skib		goto out;
7557235783Skib
7558235783Skib	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7559235783Skib	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7560235783Skib	intel_ring_emit(ring, (obj->gtt_offset));
7561235783Skib	intel_ring_emit(ring, (MI_NOOP));
7562235783Skib	intel_ring_advance(ring);
7563235783Skibout:
7564235783Skib	return ret;
7565235783Skib}
7566235783Skib
7567235783Skibstatic int intel_default_queue_flip(struct drm_device *dev,
7568235783Skib				    struct drm_crtc *crtc,
7569235783Skib				    struct drm_framebuffer *fb,
7570235783Skib				    struct drm_i915_gem_object *obj)
7571235783Skib{
7572235783Skib	return -ENODEV;
7573235783Skib}
7574235783Skib
7575235783Skibstatic int intel_crtc_page_flip(struct drm_crtc *crtc,
7576235783Skib				struct drm_framebuffer *fb,
7577235783Skib				struct drm_pending_vblank_event *event)
7578235783Skib{
7579235783Skib	struct drm_device *dev = crtc->dev;
7580235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7581235783Skib	struct intel_framebuffer *intel_fb;
7582235783Skib	struct drm_i915_gem_object *obj;
7583235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7584235783Skib	struct intel_unpin_work *work;
7585235783Skib	int ret;
7586235783Skib
7587235783Skib	work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
7588235783Skib
7589235783Skib	work->event = event;
7590235783Skib	work->dev = crtc->dev;
7591235783Skib	intel_fb = to_intel_framebuffer(crtc->fb);
7592235783Skib	work->old_fb_obj = intel_fb->obj;
7593235783Skib	TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
7594235783Skib
7595235783Skib	ret = drm_vblank_get(dev, intel_crtc->pipe);
7596235783Skib	if (ret)
7597235783Skib		goto free_work;
7598235783Skib
7599235783Skib	/* We borrow the event spin lock for protecting unpin_work */
7600235783Skib	mtx_lock(&dev->event_lock);
7601235783Skib	if (intel_crtc->unpin_work) {
7602235783Skib		mtx_unlock(&dev->event_lock);
7603235783Skib		free(work, DRM_MEM_KMS);
7604235783Skib		drm_vblank_put(dev, intel_crtc->pipe);
7605235783Skib
7606235783Skib		DRM_DEBUG("flip queue: crtc already busy\n");
7607235783Skib		return -EBUSY;
7608235783Skib	}
7609235783Skib	intel_crtc->unpin_work = work;
7610235783Skib	mtx_unlock(&dev->event_lock);
7611235783Skib
7612235783Skib	intel_fb = to_intel_framebuffer(fb);
7613235783Skib	obj = intel_fb->obj;
7614235783Skib
7615235783Skib	DRM_LOCK(dev);
7616235783Skib
7617235783Skib	/* Reference the objects for the scheduled work. */
7618235783Skib	drm_gem_object_reference(&work->old_fb_obj->base);
7619235783Skib	drm_gem_object_reference(&obj->base);
7620235783Skib
7621235783Skib	crtc->fb = fb;
7622235783Skib
7623235783Skib	work->pending_flip_obj = obj;
7624235783Skib
7625235783Skib	work->enable_stall_check = true;
7626235783Skib
7627235783Skib	/* Block clients from rendering to the new back buffer until
7628235783Skib	 * the flip occurs and the object is no longer visible.
7629235783Skib	 */
7630235783Skib	atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7631235783Skib
7632235783Skib	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7633235783Skib	if (ret)
7634235783Skib		goto cleanup_pending;
7635235783Skib	intel_disable_fbc(dev);
7636235783Skib	DRM_UNLOCK(dev);
7637235783Skib
7638235783Skib	CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
7639235783Skib
7640235783Skib	return 0;
7641235783Skib
7642235783Skibcleanup_pending:
7643255013Sjkim	atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7644235783Skib	drm_gem_object_unreference(&work->old_fb_obj->base);
7645235783Skib	drm_gem_object_unreference(&obj->base);
7646235783Skib	DRM_UNLOCK(dev);
7647235783Skib
7648235783Skib	mtx_lock(&dev->event_lock);
7649235783Skib	intel_crtc->unpin_work = NULL;
7650235783Skib	mtx_unlock(&dev->event_lock);
7651235783Skib
7652235783Skib	drm_vblank_put(dev, intel_crtc->pipe);
7653235783Skibfree_work:
7654235783Skib	free(work, DRM_MEM_KMS);
7655235783Skib
7656235783Skib	return ret;
7657235783Skib}
7658235783Skib
7659235783Skibstatic void intel_sanitize_modesetting(struct drm_device *dev,
7660235783Skib				       int pipe, int plane)
7661235783Skib{
7662235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7663235783Skib	u32 reg, val;
7664235783Skib
7665235783Skib	/* Clear any frame start delays used for debugging left by the BIOS */
7666235783Skib	for_each_pipe(pipe) {
7667235783Skib		reg = PIPECONF(pipe);
7668235783Skib		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7669235783Skib	}
7670235783Skib
7671235783Skib	if (HAS_PCH_SPLIT(dev))
7672235783Skib		return;
7673235783Skib
7674235783Skib	/* Who knows what state these registers were left in by the BIOS or
7675235783Skib	 * grub?
7676235783Skib	 *
7677235783Skib	 * If we leave the registers in a conflicting state (e.g. with the
7678235783Skib	 * display plane reading from the other pipe than the one we intend
7679235783Skib	 * to use) then when we attempt to teardown the active mode, we will
7680235783Skib	 * not disable the pipes and planes in the correct order -- leaving
7681235783Skib	 * a plane reading from a disabled pipe and possibly leading to
7682235783Skib	 * undefined behaviour.
7683235783Skib	 */
7684235783Skib
7685235783Skib	reg = DSPCNTR(plane);
7686235783Skib	val = I915_READ(reg);
7687235783Skib
7688235783Skib	if ((val & DISPLAY_PLANE_ENABLE) == 0)
7689235783Skib		return;
7690235783Skib	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7691235783Skib		return;
7692235783Skib
7693235783Skib	/* This display plane is active and attached to the other CPU pipe. */
7694235783Skib	pipe = !pipe;
7695235783Skib
7696235783Skib	/* Disable the plane and wait for it to stop reading from the pipe. */
7697235783Skib	intel_disable_plane(dev_priv, plane, pipe);
7698235783Skib	intel_disable_pipe(dev_priv, pipe);
7699235783Skib}
7700235783Skib
7701235783Skibstatic void intel_crtc_reset(struct drm_crtc *crtc)
7702235783Skib{
7703235783Skib	struct drm_device *dev = crtc->dev;
7704235783Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7705235783Skib
7706235783Skib	/* Reset flags back to the 'unknown' status so that they
7707235783Skib	 * will be correctly set on the initial modeset.
7708235783Skib	 */
7709235783Skib	intel_crtc->dpms_mode = -1;
7710235783Skib
7711235783Skib	/* We need to fix up any BIOS configuration that conflicts with
7712235783Skib	 * our expectations.
7713235783Skib	 */
7714235783Skib	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7715235783Skib}
7716235783Skib
7717235783Skibstatic struct drm_crtc_helper_funcs intel_helper_funcs = {
7718235783Skib	.dpms = intel_crtc_dpms,
7719235783Skib	.mode_fixup = intel_crtc_mode_fixup,
7720235783Skib	.mode_set = intel_crtc_mode_set,
7721235783Skib	.mode_set_base = intel_pipe_set_base,
7722235783Skib	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7723235783Skib	.load_lut = intel_crtc_load_lut,
7724235783Skib	.disable = intel_crtc_disable,
7725235783Skib};
7726235783Skib
7727235783Skibstatic const struct drm_crtc_funcs intel_crtc_funcs = {
7728235783Skib	.reset = intel_crtc_reset,
7729235783Skib	.cursor_set = intel_crtc_cursor_set,
7730235783Skib	.cursor_move = intel_crtc_cursor_move,
7731235783Skib	.gamma_set = intel_crtc_gamma_set,
7732235783Skib	.set_config = drm_crtc_helper_set_config,
7733235783Skib	.destroy = intel_crtc_destroy,
7734235783Skib	.page_flip = intel_crtc_page_flip,
7735235783Skib};
7736235783Skib
7737235783Skibstatic void intel_crtc_init(struct drm_device *dev, int pipe)
7738235783Skib{
7739235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7740235783Skib	struct intel_crtc *intel_crtc;
7741235783Skib	int i;
7742235783Skib
7743235783Skib	intel_crtc = malloc(sizeof(struct intel_crtc) +
7744235783Skib	    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
7745235783Skib	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
7746235783Skib
7747235783Skib	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7748235783Skib
7749235783Skib	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7750235783Skib	for (i = 0; i < 256; i++) {
7751235783Skib		intel_crtc->lut_r[i] = i;
7752235783Skib		intel_crtc->lut_g[i] = i;
7753235783Skib		intel_crtc->lut_b[i] = i;
7754235783Skib	}
7755235783Skib
7756235783Skib	/* Swap pipes & planes for FBC on pre-965 */
7757235783Skib	intel_crtc->pipe = pipe;
7758235783Skib	intel_crtc->plane = pipe;
7759235783Skib	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7760235783Skib		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7761235783Skib		intel_crtc->plane = !pipe;
7762235783Skib	}
7763235783Skib
7764235783Skib	KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
7765235783Skib	    dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
7766235783Skib	    ("plane_to_crtc is already initialized"));
7767235783Skib	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7768235783Skib	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7769235783Skib
7770235783Skib	intel_crtc_reset(&intel_crtc->base);
7771235783Skib	intel_crtc->active = true; /* force the pipe off on setup_init_config */
7772235783Skib	intel_crtc->bpp = 24; /* default for pre-Ironlake */
7773235783Skib
7774235783Skib	if (HAS_PCH_SPLIT(dev)) {
7775235783Skib		if (pipe == 2 && IS_IVYBRIDGE(dev))
7776235783Skib			intel_crtc->no_pll = true;
7777235783Skib		intel_helper_funcs.prepare = ironlake_crtc_prepare;
7778235783Skib		intel_helper_funcs.commit = ironlake_crtc_commit;
7779235783Skib	} else {
7780235783Skib		intel_helper_funcs.prepare = i9xx_crtc_prepare;
7781235783Skib		intel_helper_funcs.commit = i9xx_crtc_commit;
7782235783Skib	}
7783235783Skib
7784235783Skib	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7785235783Skib
7786235783Skib	intel_crtc->busy = false;
7787235783Skib
7788235783Skib	callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
7789235783Skib}
7790235783Skib
7791235783Skibint intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7792235783Skib				struct drm_file *file)
7793235783Skib{
7794235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
7795235783Skib	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7796235783Skib	struct drm_mode_object *drmmode_obj;
7797235783Skib	struct intel_crtc *crtc;
7798235783Skib
7799235783Skib	if (!dev_priv) {
7800235783Skib		DRM_ERROR("called with no initialization\n");
7801235783Skib		return -EINVAL;
7802235783Skib	}
7803235783Skib
7804235783Skib	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7805235783Skib			DRM_MODE_OBJECT_CRTC);
7806235783Skib
7807235783Skib	if (!drmmode_obj) {
7808235783Skib		DRM_ERROR("no such CRTC id\n");
7809235783Skib		return -EINVAL;
7810235783Skib	}
7811235783Skib
7812235783Skib	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7813235783Skib	pipe_from_crtc_id->pipe = crtc->pipe;
7814235783Skib
7815235783Skib	return 0;
7816235783Skib}
7817235783Skib
7818235783Skibstatic int intel_encoder_clones(struct drm_device *dev, int type_mask)
7819235783Skib{
7820235783Skib	struct intel_encoder *encoder;
7821235783Skib	int index_mask = 0;
7822235783Skib	int entry = 0;
7823235783Skib
7824235783Skib	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7825235783Skib		if (type_mask & encoder->clone_mask)
7826235783Skib			index_mask |= (1 << entry);
7827235783Skib		entry++;
7828235783Skib	}
7829235783Skib
7830235783Skib	return index_mask;
7831235783Skib}
7832235783Skib
7833235783Skibstatic bool has_edp_a(struct drm_device *dev)
7834235783Skib{
7835235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7836235783Skib
7837235783Skib	if (!IS_MOBILE(dev))
7838235783Skib		return false;
7839235783Skib
7840235783Skib	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7841235783Skib		return false;
7842235783Skib
7843235783Skib	if (IS_GEN5(dev) &&
7844235783Skib	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7845235783Skib		return false;
7846235783Skib
7847235783Skib	return true;
7848235783Skib}
7849235783Skib
7850235783Skibstatic void intel_setup_outputs(struct drm_device *dev)
7851235783Skib{
7852235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
7853235783Skib	struct intel_encoder *encoder;
7854235783Skib	bool dpd_is_edp = false;
7855235783Skib	bool has_lvds;
7856235783Skib
7857235783Skib	has_lvds = intel_lvds_init(dev);
7858235783Skib	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7859235783Skib		/* disable the panel fitter on everything but LVDS */
7860235783Skib		I915_WRITE(PFIT_CONTROL, 0);
7861235783Skib	}
7862235783Skib
7863235783Skib	if (HAS_PCH_SPLIT(dev)) {
7864235783Skib		dpd_is_edp = intel_dpd_is_edp(dev);
7865235783Skib
7866235783Skib		if (has_edp_a(dev))
7867235783Skib			intel_dp_init(dev, DP_A);
7868235783Skib
7869235783Skib		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7870235783Skib			intel_dp_init(dev, PCH_DP_D);
7871235783Skib	}
7872235783Skib
7873235783Skib	intel_crt_init(dev);
7874235783Skib
7875235783Skib	if (HAS_PCH_SPLIT(dev)) {
7876235783Skib		int found;
7877235783Skib
7878235783Skib		DRM_DEBUG_KMS(
7879235783Skib"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
7880235783Skib		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
7881235783Skib		    (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
7882235783Skib		    (I915_READ(HDMIC) & PORT_DETECTED) != 0,
7883235783Skib		    (I915_READ(HDMID) & PORT_DETECTED) != 0,
7884235783Skib		    (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
7885235783Skib		    (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
7886235783Skib		    (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
7887235783Skib
7888235783Skib		if (I915_READ(HDMIB) & PORT_DETECTED) {
7889235783Skib			/* PCH SDVOB multiplex with HDMIB */
7890235783Skib			found = intel_sdvo_init(dev, PCH_SDVOB);
7891235783Skib			if (!found)
7892235783Skib				intel_hdmi_init(dev, HDMIB);
7893235783Skib			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7894235783Skib				intel_dp_init(dev, PCH_DP_B);
7895235783Skib		}
7896235783Skib
7897235783Skib		if (I915_READ(HDMIC) & PORT_DETECTED)
7898235783Skib			intel_hdmi_init(dev, HDMIC);
7899235783Skib
7900235783Skib		if (I915_READ(HDMID) & PORT_DETECTED)
7901235783Skib			intel_hdmi_init(dev, HDMID);
7902235783Skib
7903235783Skib		if (I915_READ(PCH_DP_C) & DP_DETECTED)
7904235783Skib			intel_dp_init(dev, PCH_DP_C);
7905235783Skib
7906235783Skib		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7907235783Skib			intel_dp_init(dev, PCH_DP_D);
7908235783Skib
7909235783Skib	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7910235783Skib		bool found = false;
7911235783Skib
7912235783Skib		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7913235783Skib			DRM_DEBUG_KMS("probing SDVOB\n");
7914235783Skib			found = intel_sdvo_init(dev, SDVOB);
7915235783Skib			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7916235783Skib				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7917235783Skib				intel_hdmi_init(dev, SDVOB);
7918235783Skib			}
7919235783Skib
7920235783Skib			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7921235783Skib				DRM_DEBUG_KMS("probing DP_B\n");
7922235783Skib				intel_dp_init(dev, DP_B);
7923235783Skib			}
7924235783Skib		}
7925235783Skib
7926235783Skib		/* Before G4X SDVOC doesn't have its own detect register */
7927235783Skib
7928235783Skib		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7929235783Skib			DRM_DEBUG_KMS("probing SDVOC\n");
7930235783Skib			found = intel_sdvo_init(dev, SDVOC);
7931235783Skib		}
7932235783Skib
7933235783Skib		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7934235783Skib
7935235783Skib			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7936235783Skib				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7937235783Skib				intel_hdmi_init(dev, SDVOC);
7938235783Skib			}
7939235783Skib			if (SUPPORTS_INTEGRATED_DP(dev)) {
7940235783Skib				DRM_DEBUG_KMS("probing DP_C\n");
7941235783Skib				intel_dp_init(dev, DP_C);
7942235783Skib			}
7943235783Skib		}
7944235783Skib
7945235783Skib		if (SUPPORTS_INTEGRATED_DP(dev) &&
7946235783Skib		    (I915_READ(DP_D) & DP_DETECTED)) {
7947235783Skib			DRM_DEBUG_KMS("probing DP_D\n");
7948235783Skib			intel_dp_init(dev, DP_D);
7949235783Skib		}
7950235783Skib	} else if (IS_GEN2(dev)) {
7951235783Skib#if 1
7952235783Skib		KIB_NOTYET();
7953235783Skib#else
7954235783Skib		intel_dvo_init(dev);
7955235783Skib#endif
7956235783Skib	}
7957235783Skib
7958235783Skib	if (SUPPORTS_TV(dev))
7959235783Skib		intel_tv_init(dev);
7960235783Skib
7961235783Skib	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7962235783Skib		encoder->base.possible_crtcs = encoder->crtc_mask;
7963235783Skib		encoder->base.possible_clones =
7964235783Skib			intel_encoder_clones(dev, encoder->clone_mask);
7965235783Skib	}
7966235783Skib
7967235783Skib	/* disable all the possible outputs/crtcs before entering KMS mode */
7968235783Skib	drm_helper_disable_unused_functions(dev);
7969235783Skib
7970235783Skib	if (HAS_PCH_SPLIT(dev))
7971235783Skib		ironlake_init_pch_refclk(dev);
7972235783Skib}
7973235783Skib
7974235783Skibstatic void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7975235783Skib{
7976235783Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7977235783Skib
7978235783Skib	drm_framebuffer_cleanup(fb);
7979235783Skib	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7980235783Skib
7981235783Skib	free(intel_fb, DRM_MEM_KMS);
7982235783Skib}
7983235783Skib
7984235783Skibstatic int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7985235783Skib						struct drm_file *file,
7986235783Skib						unsigned int *handle)
7987235783Skib{
7988235783Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7989235783Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
7990235783Skib
7991235783Skib	return drm_gem_handle_create(file, &obj->base, handle);
7992235783Skib}
7993235783Skib
7994235783Skibstatic const struct drm_framebuffer_funcs intel_fb_funcs = {
7995235783Skib	.destroy = intel_user_framebuffer_destroy,
7996235783Skib	.create_handle = intel_user_framebuffer_create_handle,
7997235783Skib};
7998235783Skib
7999235783Skibint intel_framebuffer_init(struct drm_device *dev,
8000235783Skib			   struct intel_framebuffer *intel_fb,
8001235783Skib			   struct drm_mode_fb_cmd2 *mode_cmd,
8002235783Skib			   struct drm_i915_gem_object *obj)
8003235783Skib{
8004235783Skib	int ret;
8005235783Skib
8006235783Skib	if (obj->tiling_mode == I915_TILING_Y)
8007235783Skib		return -EINVAL;
8008235783Skib
8009235783Skib	if (mode_cmd->pitches[0] & 63)
8010235783Skib		return -EINVAL;
8011235783Skib
8012235783Skib	switch (mode_cmd->pixel_format) {
8013235783Skib	case DRM_FORMAT_RGB332:
8014235783Skib	case DRM_FORMAT_RGB565:
8015235783Skib	case DRM_FORMAT_XRGB8888:
8016235783Skib	case DRM_FORMAT_XBGR8888:
8017235783Skib	case DRM_FORMAT_ARGB8888:
8018235783Skib	case DRM_FORMAT_XRGB2101010:
8019235783Skib	case DRM_FORMAT_ARGB2101010:
8020235783Skib		/* RGB formats are common across chipsets */
8021235783Skib		break;
8022235783Skib	case DRM_FORMAT_YUYV:
8023235783Skib	case DRM_FORMAT_UYVY:
8024235783Skib	case DRM_FORMAT_YVYU:
8025235783Skib	case DRM_FORMAT_VYUY:
8026235783Skib		break;
8027235783Skib	default:
8028235783Skib		DRM_DEBUG_KMS("unsupported pixel format %u\n",
8029235783Skib				mode_cmd->pixel_format);
8030235783Skib		return -EINVAL;
8031235783Skib	}
8032235783Skib
8033235783Skib	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8034235783Skib	if (ret) {
8035235783Skib		DRM_ERROR("framebuffer init failed %d\n", ret);
8036235783Skib		return ret;
8037235783Skib	}
8038235783Skib
8039235783Skib	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8040235783Skib	intel_fb->obj = obj;
8041235783Skib	return 0;
8042235783Skib}
8043235783Skib
8044235783Skibstatic int
8045235783Skibintel_user_framebuffer_create(struct drm_device *dev,
8046235783Skib    struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd,
8047235783Skib    struct drm_framebuffer **res)
8048235783Skib{
8049235783Skib	struct drm_i915_gem_object *obj;
8050235783Skib
8051235783Skib	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8052235783Skib						mode_cmd->handles[0]));
8053235783Skib	if (&obj->base == NULL)
8054235783Skib		return (-ENOENT);
8055235783Skib
8056235783Skib	return (intel_framebuffer_create(dev, mode_cmd, obj, res));
8057235783Skib}
8058235783Skib
8059235783Skibstatic const struct drm_mode_config_funcs intel_mode_funcs = {
8060235783Skib	.fb_create = intel_user_framebuffer_create,
8061235783Skib	.output_poll_changed = intel_fb_output_poll_changed,
8062235783Skib};
8063235783Skib
8064235783Skibstatic struct drm_i915_gem_object *
8065235783Skibintel_alloc_context_page(struct drm_device *dev)
8066235783Skib{
8067235783Skib	struct drm_i915_gem_object *ctx;
8068235783Skib	int ret;
8069235783Skib
8070235783Skib	DRM_LOCK_ASSERT(dev);
8071235783Skib
8072235783Skib	ctx = i915_gem_alloc_object(dev, 4096);
8073235783Skib	if (!ctx) {
8074235783Skib		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8075235783Skib		return NULL;
8076235783Skib	}
8077235783Skib
8078235783Skib	ret = i915_gem_object_pin(ctx, 4096, true);
8079235783Skib	if (ret) {
8080235783Skib		DRM_ERROR("failed to pin power context: %d\n", ret);
8081235783Skib		goto err_unref;
8082235783Skib	}
8083235783Skib
8084235783Skib	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8085235783Skib	if (ret) {
8086235783Skib		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8087235783Skib		goto err_unpin;
8088235783Skib	}
8089235783Skib
8090235783Skib	return ctx;
8091235783Skib
8092235783Skiberr_unpin:
8093235783Skib	i915_gem_object_unpin(ctx);
8094235783Skiberr_unref:
8095235783Skib	drm_gem_object_unreference(&ctx->base);
8096235783Skib	DRM_UNLOCK(dev);
8097235783Skib	return NULL;
8098235783Skib}
8099235783Skib
8100235783Skibbool ironlake_set_drps(struct drm_device *dev, u8 val)
8101235783Skib{
8102235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8103235783Skib	u16 rgvswctl;
8104235783Skib
8105235783Skib	rgvswctl = I915_READ16(MEMSWCTL);
8106235783Skib	if (rgvswctl & MEMCTL_CMD_STS) {
8107235783Skib		DRM_DEBUG("gpu busy, RCS change rejected\n");
8108235783Skib		return false; /* still busy with another command */
8109235783Skib	}
8110235783Skib
8111235783Skib	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8112235783Skib		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8113235783Skib	I915_WRITE16(MEMSWCTL, rgvswctl);
8114235783Skib	POSTING_READ16(MEMSWCTL);
8115235783Skib
8116235783Skib	rgvswctl |= MEMCTL_CMD_STS;
8117235783Skib	I915_WRITE16(MEMSWCTL, rgvswctl);
8118235783Skib
8119235783Skib	return true;
8120235783Skib}
8121235783Skib
8122235783Skibvoid ironlake_enable_drps(struct drm_device *dev)
8123235783Skib{
8124235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8125235783Skib	u32 rgvmodectl = I915_READ(MEMMODECTL);
8126235783Skib	u8 fmax, fmin, fstart, vstart;
8127235783Skib
8128235783Skib	/* Enable temp reporting */
8129235783Skib	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8130235783Skib	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8131235783Skib
8132235783Skib	/* 100ms RC evaluation intervals */
8133235783Skib	I915_WRITE(RCUPEI, 100000);
8134235783Skib	I915_WRITE(RCDNEI, 100000);
8135235783Skib
8136235783Skib	/* Set max/min thresholds to 90ms and 80ms respectively */
8137235783Skib	I915_WRITE(RCBMAXAVG, 90000);
8138235783Skib	I915_WRITE(RCBMINAVG, 80000);
8139235783Skib
8140235783Skib	I915_WRITE(MEMIHYST, 1);
8141235783Skib
8142235783Skib	/* Set up min, max, and cur for interrupt handling */
8143235783Skib	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8144235783Skib	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8145235783Skib	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8146235783Skib		MEMMODE_FSTART_SHIFT;
8147235783Skib
8148235783Skib	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8149235783Skib		PXVFREQ_PX_SHIFT;
8150235783Skib
8151235783Skib	dev_priv->fmax = fmax; /* IPS callback will increase this */
8152235783Skib	dev_priv->fstart = fstart;
8153235783Skib
8154235783Skib	dev_priv->max_delay = fstart;
8155235783Skib	dev_priv->min_delay = fmin;
8156235783Skib	dev_priv->cur_delay = fstart;
8157235783Skib
8158235783Skib	DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
8159235783Skib			 fmax, fmin, fstart);
8160235783Skib
8161235783Skib	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8162235783Skib
8163235783Skib	/*
8164235783Skib	 * Interrupts will be enabled in ironlake_irq_postinstall
8165235783Skib	 */
8166235783Skib
8167235783Skib	I915_WRITE(VIDSTART, vstart);
8168235783Skib	POSTING_READ(VIDSTART);
8169235783Skib
8170235783Skib	rgvmodectl |= MEMMODE_SWMODE_EN;
8171235783Skib	I915_WRITE(MEMMODECTL, rgvmodectl);
8172235783Skib
8173235783Skib	if (_intel_wait_for(dev,
8174235783Skib	    (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
8175235783Skib	    1, "915per"))
8176235783Skib		DRM_ERROR("stuck trying to change perf mode\n");
8177235783Skib	pause("915dsp", 1);
8178235783Skib
8179235783Skib	ironlake_set_drps(dev, fstart);
8180235783Skib
8181235783Skib	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8182235783Skib		I915_READ(0x112e0);
8183235783Skib	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8184235783Skib	dev_priv->last_count2 = I915_READ(0x112f4);
8185235783Skib	nanotime(&dev_priv->last_time2);
8186235783Skib}
8187235783Skib
8188235783Skibvoid ironlake_disable_drps(struct drm_device *dev)
8189235783Skib{
8190235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8191235783Skib	u16 rgvswctl = I915_READ16(MEMSWCTL);
8192235783Skib
8193235783Skib	/* Ack interrupts, disable EFC interrupt */
8194235783Skib	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8195235783Skib	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8196235783Skib	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8197235783Skib	I915_WRITE(DEIIR, DE_PCU_EVENT);
8198235783Skib	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8199235783Skib
8200235783Skib	/* Go back to the starting frequency */
8201235783Skib	ironlake_set_drps(dev, dev_priv->fstart);
8202235783Skib	pause("915dsp", 1);
8203235783Skib	rgvswctl |= MEMCTL_CMD_STS;
8204235783Skib	I915_WRITE(MEMSWCTL, rgvswctl);
8205235783Skib	pause("915dsp", 1);
8206235783Skib
8207235783Skib}
8208235783Skib
8209235783Skibvoid gen6_set_rps(struct drm_device *dev, u8 val)
8210235783Skib{
8211235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8212235783Skib	u32 swreq;
8213235783Skib
8214235783Skib	swreq = (val & 0x3ff) << 25;
8215235783Skib	I915_WRITE(GEN6_RPNSWREQ, swreq);
8216235783Skib}
8217235783Skib
8218235783Skibvoid gen6_disable_rps(struct drm_device *dev)
8219235783Skib{
8220235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8221235783Skib
8222235783Skib	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8223235783Skib	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8224235783Skib	I915_WRITE(GEN6_PMIER, 0);
8225235783Skib	/* Complete PM interrupt masking here doesn't race with the rps work
8226235783Skib	 * item again unmasking PM interrupts because that is using a different
8227235783Skib	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8228235783Skib	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8229235783Skib
8230235783Skib	mtx_lock(&dev_priv->rps_lock);
8231235783Skib	dev_priv->pm_iir = 0;
8232235783Skib	mtx_unlock(&dev_priv->rps_lock);
8233235783Skib
8234235783Skib	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8235235783Skib}
8236235783Skib
8237235783Skibstatic unsigned long intel_pxfreq(u32 vidfreq)
8238235783Skib{
8239235783Skib	unsigned long freq;
8240235783Skib	int div = (vidfreq & 0x3f0000) >> 16;
8241235783Skib	int post = (vidfreq & 0x3000) >> 12;
8242235783Skib	int pre = (vidfreq & 0x7);
8243235783Skib
8244235783Skib	if (!pre)
8245235783Skib		return 0;
8246235783Skib
8247235783Skib	freq = ((div * 133333) / ((1<<post) * pre));
8248235783Skib
8249235783Skib	return freq;
8250235783Skib}
8251235783Skib
8252235783Skibvoid intel_init_emon(struct drm_device *dev)
8253235783Skib{
8254235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8255235783Skib	u32 lcfuse;
8256235783Skib	u8 pxw[16];
8257235783Skib	int i;
8258235783Skib
8259235783Skib	/* Disable to program */
8260235783Skib	I915_WRITE(ECR, 0);
8261235783Skib	POSTING_READ(ECR);
8262235783Skib
8263235783Skib	/* Program energy weights for various events */
8264235783Skib	I915_WRITE(SDEW, 0x15040d00);
8265235783Skib	I915_WRITE(CSIEW0, 0x007f0000);
8266235783Skib	I915_WRITE(CSIEW1, 0x1e220004);
8267235783Skib	I915_WRITE(CSIEW2, 0x04000004);
8268235783Skib
8269235783Skib	for (i = 0; i < 5; i++)
8270235783Skib		I915_WRITE(PEW + (i * 4), 0);
8271235783Skib	for (i = 0; i < 3; i++)
8272235783Skib		I915_WRITE(DEW + (i * 4), 0);
8273235783Skib
8274235783Skib	/* Program P-state weights to account for frequency power adjustment */
8275235783Skib	for (i = 0; i < 16; i++) {
8276235783Skib		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8277235783Skib		unsigned long freq = intel_pxfreq(pxvidfreq);
8278235783Skib		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8279235783Skib			PXVFREQ_PX_SHIFT;
8280235783Skib		unsigned long val;
8281235783Skib
8282235783Skib		val = vid * vid;
8283235783Skib		val *= (freq / 1000);
8284235783Skib		val *= 255;
8285235783Skib		val /= (127*127*900);
8286235783Skib		if (val > 0xff)
8287235783Skib			DRM_ERROR("bad pxval: %ld\n", val);
8288235783Skib		pxw[i] = val;
8289235783Skib	}
8290235783Skib	/* Render standby states get 0 weight */
8291235783Skib	pxw[14] = 0;
8292235783Skib	pxw[15] = 0;
8293235783Skib
8294235783Skib	for (i = 0; i < 4; i++) {
8295235783Skib		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8296235783Skib			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8297235783Skib		I915_WRITE(PXW + (i * 4), val);
8298235783Skib	}
8299235783Skib
8300235783Skib	/* Adjust magic regs to magic values (more experimental results) */
8301235783Skib	I915_WRITE(OGW0, 0);
8302235783Skib	I915_WRITE(OGW1, 0);
8303235783Skib	I915_WRITE(EG0, 0x00007f00);
8304235783Skib	I915_WRITE(EG1, 0x0000000e);
8305235783Skib	I915_WRITE(EG2, 0x000e0000);
8306235783Skib	I915_WRITE(EG3, 0x68000300);
8307235783Skib	I915_WRITE(EG4, 0x42000000);
8308235783Skib	I915_WRITE(EG5, 0x00140031);
8309235783Skib	I915_WRITE(EG6, 0);
8310235783Skib	I915_WRITE(EG7, 0);
8311235783Skib
8312235783Skib	for (i = 0; i < 8; i++)
8313235783Skib		I915_WRITE(PXWL + (i * 4), 0);
8314235783Skib
8315235783Skib	/* Enable PMON + select events */
8316235783Skib	I915_WRITE(ECR, 0x80000019);
8317235783Skib
8318235783Skib	lcfuse = I915_READ(LCFUSE02);
8319235783Skib
8320235783Skib	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8321235783Skib}
8322235783Skib
8323235783Skibstatic int intel_enable_rc6(struct drm_device *dev)
8324235783Skib{
8325235783Skib	/*
8326235783Skib	 * Respect the kernel parameter if it is set
8327235783Skib	 */
8328235783Skib	if (i915_enable_rc6 >= 0)
8329235783Skib		return i915_enable_rc6;
8330235783Skib
8331235783Skib	/*
8332235783Skib	 * Disable RC6 on Ironlake
8333235783Skib	 */
8334235783Skib	if (INTEL_INFO(dev)->gen == 5)
8335235783Skib		return 0;
8336235783Skib
8337235783Skib	/*
8338235783Skib	 * Enable rc6 on Sandybridge if DMA remapping is disabled
8339235783Skib	 */
8340235783Skib	if (INTEL_INFO(dev)->gen == 6) {
8341235783Skib		DRM_DEBUG_DRIVER(
8342235783Skib		    "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
8343235783Skib		     intel_iommu_enabled ? "true" : "false",
8344235783Skib		     !intel_iommu_enabled ? "en" : "dis");
8345235783Skib		return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
8346235783Skib	}
8347235783Skib	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8348235783Skib	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8349235783Skib}
8350235783Skib
8351235783Skibvoid gen6_enable_rps(struct drm_i915_private *dev_priv)
8352235783Skib{
8353235783Skib	struct drm_device *dev = dev_priv->dev;
8354235783Skib	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8355235783Skib	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8356235783Skib	u32 pcu_mbox, rc6_mask = 0;
8357235783Skib	u32 gtfifodbg;
8358235783Skib	int cur_freq, min_freq, max_freq;
8359235783Skib	int rc6_mode;
8360235783Skib	int i;
8361235783Skib
8362235783Skib	/* Here begins a magic sequence of register writes to enable
8363235783Skib	 * auto-downclocking.
8364235783Skib	 *
8365235783Skib	 * Perhaps there might be some value in exposing these to
8366235783Skib	 * userspace...
8367235783Skib	 */
8368235783Skib	I915_WRITE(GEN6_RC_STATE, 0);
8369235783Skib	DRM_LOCK(dev);
8370235783Skib
8371235783Skib	/* Clear the DBG now so we don't confuse earlier errors */
8372235783Skib	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8373235783Skib		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8374235783Skib		I915_WRITE(GTFIFODBG, gtfifodbg);
8375235783Skib	}
8376235783Skib
8377235783Skib	gen6_gt_force_wake_get(dev_priv);
8378235783Skib
8379235783Skib	/* disable the counters and set deterministic thresholds */
8380235783Skib	I915_WRITE(GEN6_RC_CONTROL, 0);
8381235783Skib
8382235783Skib	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8383235783Skib	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8384235783Skib	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8385235783Skib	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8386235783Skib	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8387235783Skib
8388235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
8389235783Skib		I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
8390235783Skib
8391235783Skib	I915_WRITE(GEN6_RC_SLEEP, 0);
8392235783Skib	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8393235783Skib	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8394235783Skib	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8395235783Skib	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8396235783Skib
8397235783Skib	rc6_mode = intel_enable_rc6(dev_priv->dev);
8398235783Skib	if (rc6_mode & INTEL_RC6_ENABLE)
8399235783Skib		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8400235783Skib
8401235783Skib	if (rc6_mode & INTEL_RC6p_ENABLE)
8402235783Skib		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8403235783Skib
8404235783Skib	if (rc6_mode & INTEL_RC6pp_ENABLE)
8405235783Skib		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8406235783Skib
8407235783Skib	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8408235783Skib			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8409235783Skib			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8410235783Skib			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8411235783Skib
8412235783Skib	I915_WRITE(GEN6_RC_CONTROL,
8413235783Skib		   rc6_mask |
8414235783Skib		   GEN6_RC_CTL_EI_MODE(1) |
8415235783Skib		   GEN6_RC_CTL_HW_ENABLE);
8416235783Skib
8417235783Skib	I915_WRITE(GEN6_RPNSWREQ,
8418235783Skib		   GEN6_FREQUENCY(10) |
8419235783Skib		   GEN6_OFFSET(0) |
8420235783Skib		   GEN6_AGGRESSIVE_TURBO);
8421235783Skib	I915_WRITE(GEN6_RC_VIDEO_FREQ,
8422235783Skib		   GEN6_FREQUENCY(12));
8423235783Skib
8424235783Skib	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8425235783Skib	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8426235783Skib		   18 << 24 |
8427235783Skib		   6 << 16);
8428235783Skib	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8429235783Skib	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8430235783Skib	I915_WRITE(GEN6_RP_UP_EI, 100000);
8431235783Skib	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8432235783Skib	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8433235783Skib	I915_WRITE(GEN6_RP_CONTROL,
8434235783Skib		   GEN6_RP_MEDIA_TURBO |
8435235783Skib		   GEN6_RP_MEDIA_HW_MODE |
8436235783Skib		   GEN6_RP_MEDIA_IS_GFX |
8437235783Skib		   GEN6_RP_ENABLE |
8438235783Skib		   GEN6_RP_UP_BUSY_AVG |
8439235783Skib		   GEN6_RP_DOWN_IDLE_CONT);
8440235783Skib
8441235783Skib	if (_intel_wait_for(dev,
8442235783Skib	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8443235783Skib	    1, "915pr1"))
8444235783Skib		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8445235783Skib
8446235783Skib	I915_WRITE(GEN6_PCODE_DATA, 0);
8447235783Skib	I915_WRITE(GEN6_PCODE_MAILBOX,
8448235783Skib		   GEN6_PCODE_READY |
8449235783Skib		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8450235783Skib	if (_intel_wait_for(dev,
8451235783Skib	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8452235783Skib	    1, "915pr2"))
8453235783Skib		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8454235783Skib
8455235783Skib	min_freq = (rp_state_cap & 0xff0000) >> 16;
8456235783Skib	max_freq = rp_state_cap & 0xff;
8457235783Skib	cur_freq = (gt_perf_status & 0xff00) >> 8;
8458235783Skib
8459235783Skib	/* Check for overclock support */
8460235783Skib	if (_intel_wait_for(dev,
8461235783Skib	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8462235783Skib	    1, "915pr3"))
8463235783Skib		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8464235783Skib	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8465235783Skib	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8466235783Skib	if (_intel_wait_for(dev,
8467235783Skib	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8468235783Skib	    1, "915pr4"))
8469235783Skib		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8470235783Skib	if (pcu_mbox & (1<<31)) { /* OC supported */
8471235783Skib		max_freq = pcu_mbox & 0xff;
8472235783Skib		DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8473235783Skib	}
8474235783Skib
8475235783Skib	/* In units of 100MHz */
8476235783Skib	dev_priv->max_delay = max_freq;
8477235783Skib	dev_priv->min_delay = min_freq;
8478235783Skib	dev_priv->cur_delay = cur_freq;
8479235783Skib
8480235783Skib	/* requires MSI enabled */
8481235783Skib	I915_WRITE(GEN6_PMIER,
8482235783Skib		   GEN6_PM_MBOX_EVENT |
8483235783Skib		   GEN6_PM_THERMAL_EVENT |
8484235783Skib		   GEN6_PM_RP_DOWN_TIMEOUT |
8485235783Skib		   GEN6_PM_RP_UP_THRESHOLD |
8486235783Skib		   GEN6_PM_RP_DOWN_THRESHOLD |
8487235783Skib		   GEN6_PM_RP_UP_EI_EXPIRED |
8488235783Skib		   GEN6_PM_RP_DOWN_EI_EXPIRED);
8489235783Skib	mtx_lock(&dev_priv->rps_lock);
8490235783Skib	if (dev_priv->pm_iir != 0)
8491235783Skib		printf("pm_iir %x\n", dev_priv->pm_iir);
8492235783Skib	I915_WRITE(GEN6_PMIMR, 0);
8493235783Skib	mtx_unlock(&dev_priv->rps_lock);
8494235783Skib	/* enable all PM interrupts */
8495235783Skib	I915_WRITE(GEN6_PMINTRMSK, 0);
8496235783Skib
8497235783Skib	gen6_gt_force_wake_put(dev_priv);
8498235783Skib	DRM_UNLOCK(dev);
8499235783Skib}
8500235783Skib
8501235783Skibvoid gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8502235783Skib{
8503235783Skib	struct drm_device *dev;
8504235783Skib	int min_freq = 15;
8505235783Skib	int gpu_freq, ia_freq, max_ia_freq;
8506235783Skib	int scaling_factor = 180;
8507235783Skib	uint64_t tsc_freq;
8508235783Skib
8509235783Skib	dev = dev_priv->dev;
8510235783Skib#if 0
8511235783Skib	max_ia_freq = cpufreq_quick_get_max(0);
8512235783Skib	/*
8513235783Skib	 * Default to measured freq if none found, PCU will ensure we don't go
8514235783Skib	 * over
8515235783Skib	 */
8516235783Skib	if (!max_ia_freq)
8517235783Skib		max_ia_freq = tsc_freq;
8518235783Skib
8519235783Skib	/* Convert from Hz to MHz */
8520235783Skib	max_ia_freq /= 1000;
8521235783Skib#else
8522235783Skib	tsc_freq = atomic_load_acq_64(&tsc_freq);
8523235783Skib	max_ia_freq = tsc_freq / 1000 / 1000;
8524235783Skib#endif
8525235783Skib
8526235783Skib	DRM_LOCK(dev);
8527235783Skib
8528235783Skib	/*
8529235783Skib	 * For each potential GPU frequency, load a ring frequency we'd like
8530235783Skib	 * to use for memory access.  We do this by specifying the IA frequency
8531235783Skib	 * the PCU should use as a reference to determine the ring frequency.
8532235783Skib	 */
8533235783Skib	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8534235783Skib	     gpu_freq--) {
8535235783Skib		int diff = dev_priv->max_delay - gpu_freq;
8536235783Skib		int d;
8537235783Skib
8538235783Skib		/*
8539235783Skib		 * For GPU frequencies less than 750MHz, just use the lowest
8540235783Skib		 * ring freq.
8541235783Skib		 */
8542235783Skib		if (gpu_freq < min_freq)
8543235783Skib			ia_freq = 800;
8544235783Skib		else
8545235783Skib			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8546235783Skib		d = 100;
8547235783Skib		ia_freq = (ia_freq + d / 2) / d;
8548235783Skib
8549235783Skib		I915_WRITE(GEN6_PCODE_DATA,
8550235783Skib			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8551235783Skib			   gpu_freq);
8552235783Skib		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8553235783Skib			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8554235783Skib		if (_intel_wait_for(dev,
8555235783Skib		    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8556235783Skib		    10, 1, "915frq")) {
8557235783Skib			DRM_ERROR("pcode write of freq table timed out\n");
8558235783Skib			continue;
8559235783Skib		}
8560235783Skib	}
8561235783Skib
8562235783Skib	DRM_UNLOCK(dev);
8563235783Skib}
8564235783Skib
8565235783Skibstatic void ironlake_init_clock_gating(struct drm_device *dev)
8566235783Skib{
8567235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8568235783Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8569235783Skib
8570235783Skib	/* Required for FBC */
8571235783Skib	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8572235783Skib		DPFCRUNIT_CLOCK_GATE_DISABLE |
8573235783Skib		DPFDUNIT_CLOCK_GATE_DISABLE;
8574235783Skib	/* Required for CxSR */
8575235783Skib	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8576235783Skib
8577235783Skib	I915_WRITE(PCH_3DCGDIS0,
8578235783Skib		   MARIUNIT_CLOCK_GATE_DISABLE |
8579235783Skib		   SVSMUNIT_CLOCK_GATE_DISABLE);
8580235783Skib	I915_WRITE(PCH_3DCGDIS1,
8581235783Skib		   VFMUNIT_CLOCK_GATE_DISABLE);
8582235783Skib
8583235783Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8584235783Skib
8585235783Skib	/*
8586235783Skib	 * According to the spec the following bits should be set in
8587235783Skib	 * order to enable memory self-refresh
8588235783Skib	 * The bit 22/21 of 0x42004
8589235783Skib	 * The bit 5 of 0x42020
8590235783Skib	 * The bit 15 of 0x45000
8591235783Skib	 */
8592235783Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8593235783Skib		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
8594235783Skib		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8595235783Skib	I915_WRITE(ILK_DSPCLK_GATE,
8596235783Skib		   (I915_READ(ILK_DSPCLK_GATE) |
8597235783Skib		    ILK_DPARB_CLK_GATE));
8598235783Skib	I915_WRITE(DISP_ARB_CTL,
8599235783Skib		   (I915_READ(DISP_ARB_CTL) |
8600235783Skib		    DISP_FBC_WM_DIS));
8601235783Skib	I915_WRITE(WM3_LP_ILK, 0);
8602235783Skib	I915_WRITE(WM2_LP_ILK, 0);
8603235783Skib	I915_WRITE(WM1_LP_ILK, 0);
8604235783Skib
8605235783Skib	/*
8606235783Skib	 * Based on the document from hardware guys the following bits
8607235783Skib	 * should be set unconditionally in order to enable FBC.
8608235783Skib	 * The bit 22 of 0x42000
8609235783Skib	 * The bit 22 of 0x42004
8610235783Skib	 * The bit 7,8,9 of 0x42020.
8611235783Skib	 */
8612235783Skib	if (IS_IRONLAKE_M(dev)) {
8613235783Skib		I915_WRITE(ILK_DISPLAY_CHICKEN1,
8614235783Skib			   I915_READ(ILK_DISPLAY_CHICKEN1) |
8615235783Skib			   ILK_FBCQ_DIS);
8616235783Skib		I915_WRITE(ILK_DISPLAY_CHICKEN2,
8617235783Skib			   I915_READ(ILK_DISPLAY_CHICKEN2) |
8618235783Skib			   ILK_DPARB_GATE);
8619235783Skib		I915_WRITE(ILK_DSPCLK_GATE,
8620235783Skib			   I915_READ(ILK_DSPCLK_GATE) |
8621235783Skib			   ILK_DPFC_DIS1 |
8622235783Skib			   ILK_DPFC_DIS2 |
8623235783Skib			   ILK_CLK_FBC);
8624235783Skib	}
8625235783Skib
8626235783Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8627235783Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8628235783Skib		   ILK_ELPIN_409_SELECT);
8629235783Skib	I915_WRITE(_3D_CHICKEN2,
8630235783Skib		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8631235783Skib		   _3D_CHICKEN2_WM_READ_PIPELINED);
8632235783Skib}
8633235783Skib
8634235783Skibstatic void gen6_init_clock_gating(struct drm_device *dev)
8635235783Skib{
8636235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8637235783Skib	int pipe;
8638235783Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8639235783Skib
8640235783Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8641235783Skib
8642235783Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8643235783Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8644235783Skib		   ILK_ELPIN_409_SELECT);
8645235783Skib
8646235783Skib	I915_WRITE(WM3_LP_ILK, 0);
8647235783Skib	I915_WRITE(WM2_LP_ILK, 0);
8648235783Skib	I915_WRITE(WM1_LP_ILK, 0);
8649235783Skib
8650235783Skib	I915_WRITE(GEN6_UCGCTL1,
8651235783Skib		   I915_READ(GEN6_UCGCTL1) |
8652235783Skib		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8653235783Skib
8654235783Skib	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8655235783Skib	 * gating disable must be set.  Failure to set it results in
8656235783Skib	 * flickering pixels due to Z write ordering failures after
8657235783Skib	 * some amount of runtime in the Mesa "fire" demo, and Unigine
8658235783Skib	 * Sanctuary and Tropics, and apparently anything else with
8659235783Skib	 * alpha test or pixel discard.
8660235783Skib	 *
8661235783Skib	 * According to the spec, bit 11 (RCCUNIT) must also be set,
8662235783Skib	 * but we didn't debug actual testcases to find it out.
8663235783Skib	 */
8664235783Skib	I915_WRITE(GEN6_UCGCTL2,
8665235783Skib		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8666235783Skib		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8667235783Skib
8668235783Skib	/*
8669235783Skib	 * According to the spec the following bits should be
8670235783Skib	 * set in order to enable memory self-refresh and fbc:
8671235783Skib	 * The bit21 and bit22 of 0x42000
8672235783Skib	 * The bit21 and bit22 of 0x42004
8673235783Skib	 * The bit5 and bit7 of 0x42020
8674235783Skib	 * The bit14 of 0x70180
8675235783Skib	 * The bit14 of 0x71180
8676235783Skib	 */
8677235783Skib	I915_WRITE(ILK_DISPLAY_CHICKEN1,
8678235783Skib		   I915_READ(ILK_DISPLAY_CHICKEN1) |
8679235783Skib		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8680235783Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8681235783Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8682235783Skib		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8683235783Skib	I915_WRITE(ILK_DSPCLK_GATE,
8684235783Skib		   I915_READ(ILK_DSPCLK_GATE) |
8685235783Skib		   ILK_DPARB_CLK_GATE  |
8686235783Skib		   ILK_DPFD_CLK_GATE);
8687235783Skib
8688235783Skib	for_each_pipe(pipe) {
8689235783Skib		I915_WRITE(DSPCNTR(pipe),
8690235783Skib			   I915_READ(DSPCNTR(pipe)) |
8691235783Skib			   DISPPLANE_TRICKLE_FEED_DISABLE);
8692235783Skib		intel_flush_display_plane(dev_priv, pipe);
8693235783Skib	}
8694235783Skib}
8695235783Skib
8696235783Skibstatic void ivybridge_init_clock_gating(struct drm_device *dev)
8697235783Skib{
8698235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8699235783Skib	int pipe;
8700235783Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8701235783Skib
8702235783Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8703235783Skib
8704235783Skib	I915_WRITE(WM3_LP_ILK, 0);
8705235783Skib	I915_WRITE(WM2_LP_ILK, 0);
8706235783Skib	I915_WRITE(WM1_LP_ILK, 0);
8707235783Skib
8708235783Skib	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8709235783Skib	 * This implements the WaDisableRCZUnitClockGating workaround.
8710235783Skib	 */
8711235783Skib	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8712235783Skib
8713235783Skib	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8714235783Skib
8715235783Skib	I915_WRITE(IVB_CHICKEN3,
8716235783Skib		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8717235783Skib		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
8718235783Skib
8719235783Skib	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8720235783Skib	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8721235783Skib		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8722235783Skib
8723235783Skib	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8724235783Skib	I915_WRITE(GEN7_L3CNTLREG1,
8725235783Skib			GEN7_WA_FOR_GEN7_L3_CONTROL);
8726235783Skib	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8727235783Skib			GEN7_WA_L3_CHICKEN_MODE);
8728235783Skib
8729235783Skib	/* This is required by WaCatErrorRejectionIssue */
8730235783Skib	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8731235783Skib			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8732235783Skib			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8733235783Skib
8734235783Skib	for_each_pipe(pipe) {
8735235783Skib		I915_WRITE(DSPCNTR(pipe),
8736235783Skib			   I915_READ(DSPCNTR(pipe)) |
8737235783Skib			   DISPPLANE_TRICKLE_FEED_DISABLE);
8738235783Skib		intel_flush_display_plane(dev_priv, pipe);
8739235783Skib	}
8740235783Skib}
8741235783Skib
8742235783Skibstatic void g4x_init_clock_gating(struct drm_device *dev)
8743235783Skib{
8744235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8745235783Skib	uint32_t dspclk_gate;
8746235783Skib
8747235783Skib	I915_WRITE(RENCLK_GATE_D1, 0);
8748235783Skib	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8749235783Skib		   GS_UNIT_CLOCK_GATE_DISABLE |
8750235783Skib		   CL_UNIT_CLOCK_GATE_DISABLE);
8751235783Skib	I915_WRITE(RAMCLK_GATE_D, 0);
8752235783Skib	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8753235783Skib		OVRUNIT_CLOCK_GATE_DISABLE |
8754235783Skib		OVCUNIT_CLOCK_GATE_DISABLE;
8755235783Skib	if (IS_GM45(dev))
8756235783Skib		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8757235783Skib	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8758235783Skib}
8759235783Skib
8760235783Skibstatic void crestline_init_clock_gating(struct drm_device *dev)
8761235783Skib{
8762235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8763235783Skib
8764235783Skib	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8765235783Skib	I915_WRITE(RENCLK_GATE_D2, 0);
8766235783Skib	I915_WRITE(DSPCLK_GATE_D, 0);
8767235783Skib	I915_WRITE(RAMCLK_GATE_D, 0);
8768235783Skib	I915_WRITE16(DEUC, 0);
8769235783Skib}
8770235783Skib
8771235783Skibstatic void broadwater_init_clock_gating(struct drm_device *dev)
8772235783Skib{
8773235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8774235783Skib
8775235783Skib	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8776235783Skib		   I965_RCC_CLOCK_GATE_DISABLE |
8777235783Skib		   I965_RCPB_CLOCK_GATE_DISABLE |
8778235783Skib		   I965_ISC_CLOCK_GATE_DISABLE |
8779235783Skib		   I965_FBC_CLOCK_GATE_DISABLE);
8780235783Skib	I915_WRITE(RENCLK_GATE_D2, 0);
8781235783Skib}
8782235783Skib
8783235783Skibstatic void gen3_init_clock_gating(struct drm_device *dev)
8784235783Skib{
8785235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8786235783Skib	u32 dstate = I915_READ(D_STATE);
8787235783Skib
8788235783Skib	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8789235783Skib		DSTATE_DOT_CLOCK_GATING;
8790235783Skib	I915_WRITE(D_STATE, dstate);
8791235783Skib}
8792235783Skib
8793235783Skibstatic void i85x_init_clock_gating(struct drm_device *dev)
8794235783Skib{
8795235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8796235783Skib
8797235783Skib	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8798235783Skib}
8799235783Skib
8800235783Skibstatic void i830_init_clock_gating(struct drm_device *dev)
8801235783Skib{
8802235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8803235783Skib
8804235783Skib	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8805235783Skib}
8806235783Skib
8807235783Skibstatic void ibx_init_clock_gating(struct drm_device *dev)
8808235783Skib{
8809235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8810235783Skib
8811235783Skib	/*
8812235783Skib	 * On Ibex Peak and Cougar Point, we need to disable clock
8813235783Skib	 * gating for the panel power sequencer or it will fail to
8814235783Skib	 * start up when no ports are active.
8815235783Skib	 */
8816235783Skib	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8817235783Skib}
8818235783Skib
8819235783Skibstatic void cpt_init_clock_gating(struct drm_device *dev)
8820235783Skib{
8821235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8822235783Skib	int pipe;
8823235783Skib
8824235783Skib	/*
8825235783Skib	 * On Ibex Peak and Cougar Point, we need to disable clock
8826235783Skib	 * gating for the panel power sequencer or it will fail to
8827235783Skib	 * start up when no ports are active.
8828235783Skib	 */
8829235783Skib	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8830235783Skib	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8831235783Skib		   DPLS_EDP_PPS_FIX_DIS);
8832235783Skib	/* Without this, mode sets may fail silently on FDI */
8833235783Skib	for_each_pipe(pipe)
8834235783Skib		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8835235783Skib}
8836235783Skib
8837235783Skibstatic void ironlake_teardown_rc6(struct drm_device *dev)
8838235783Skib{
8839235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8840235783Skib
8841235783Skib	if (dev_priv->renderctx) {
8842235783Skib		i915_gem_object_unpin(dev_priv->renderctx);
8843235783Skib		drm_gem_object_unreference(&dev_priv->renderctx->base);
8844235783Skib		dev_priv->renderctx = NULL;
8845235783Skib	}
8846235783Skib
8847235783Skib	if (dev_priv->pwrctx) {
8848235783Skib		i915_gem_object_unpin(dev_priv->pwrctx);
8849235783Skib		drm_gem_object_unreference(&dev_priv->pwrctx->base);
8850235783Skib		dev_priv->pwrctx = NULL;
8851235783Skib	}
8852235783Skib}
8853235783Skib
8854235783Skibstatic void ironlake_disable_rc6(struct drm_device *dev)
8855235783Skib{
8856235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8857235783Skib
8858235783Skib	if (I915_READ(PWRCTXA)) {
8859235783Skib		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8860235783Skib		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8861235925Sdim		(void)_intel_wait_for(dev,
8862235783Skib		    ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8863235783Skib		    50, 1, "915pro");
8864235783Skib
8865235783Skib		I915_WRITE(PWRCTXA, 0);
8866235783Skib		POSTING_READ(PWRCTXA);
8867235783Skib
8868235783Skib		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8869235783Skib		POSTING_READ(RSTDBYCTL);
8870235783Skib	}
8871235783Skib
8872235783Skib	ironlake_teardown_rc6(dev);
8873235783Skib}
8874235783Skib
8875235783Skibstatic int ironlake_setup_rc6(struct drm_device *dev)
8876235783Skib{
8877235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8878235783Skib
8879235783Skib	if (dev_priv->renderctx == NULL)
8880235783Skib		dev_priv->renderctx = intel_alloc_context_page(dev);
8881235783Skib	if (!dev_priv->renderctx)
8882235783Skib		return -ENOMEM;
8883235783Skib
8884235783Skib	if (dev_priv->pwrctx == NULL)
8885235783Skib		dev_priv->pwrctx = intel_alloc_context_page(dev);
8886235783Skib	if (!dev_priv->pwrctx) {
8887235783Skib		ironlake_teardown_rc6(dev);
8888235783Skib		return -ENOMEM;
8889235783Skib	}
8890235783Skib
8891235783Skib	return 0;
8892235783Skib}
8893235783Skib
8894235783Skibvoid ironlake_enable_rc6(struct drm_device *dev)
8895235783Skib{
8896235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8897235783Skib	int ret;
8898235783Skib
8899235783Skib	/* rc6 disabled by default due to repeated reports of hanging during
8900235783Skib	 * boot and resume.
8901235783Skib	 */
8902235783Skib	if (!intel_enable_rc6(dev))
8903235783Skib		return;
8904235783Skib
8905235783Skib	DRM_LOCK(dev);
8906235783Skib	ret = ironlake_setup_rc6(dev);
8907235783Skib	if (ret) {
8908235783Skib		DRM_UNLOCK(dev);
8909235783Skib		return;
8910235783Skib	}
8911235783Skib
8912235783Skib	/*
8913235783Skib	 * GPU can automatically power down the render unit if given a page
8914235783Skib	 * to save state.
8915235783Skib	 */
8916235783Skib	ret = BEGIN_LP_RING(6);
8917235783Skib	if (ret) {
8918235783Skib		ironlake_teardown_rc6(dev);
8919235783Skib		DRM_UNLOCK(dev);
8920235783Skib		return;
8921235783Skib	}
8922235783Skib
8923235783Skib	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8924235783Skib	OUT_RING(MI_SET_CONTEXT);
8925235783Skib	OUT_RING(dev_priv->renderctx->gtt_offset |
8926235783Skib		 MI_MM_SPACE_GTT |
8927235783Skib		 MI_SAVE_EXT_STATE_EN |
8928235783Skib		 MI_RESTORE_EXT_STATE_EN |
8929235783Skib		 MI_RESTORE_INHIBIT);
8930235783Skib	OUT_RING(MI_SUSPEND_FLUSH);
8931235783Skib	OUT_RING(MI_NOOP);
8932235783Skib	OUT_RING(MI_FLUSH);
8933235783Skib	ADVANCE_LP_RING();
8934235783Skib
8935235783Skib	/*
8936235783Skib	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8937235783Skib	 * does an implicit flush, combined with MI_FLUSH above, it should be
8938235783Skib	 * safe to assume that renderctx is valid
8939235783Skib	 */
8940235783Skib	ret = intel_wait_ring_idle(LP_RING(dev_priv));
8941235783Skib	if (ret) {
8942235783Skib		DRM_ERROR("failed to enable ironlake power power savings\n");
8943235783Skib		ironlake_teardown_rc6(dev);
8944235783Skib		DRM_UNLOCK(dev);
8945235783Skib		return;
8946235783Skib	}
8947235783Skib
8948235783Skib	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8949235783Skib	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8950235783Skib	DRM_UNLOCK(dev);
8951235783Skib}
8952235783Skib
8953235783Skibvoid intel_init_clock_gating(struct drm_device *dev)
8954235783Skib{
8955235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8956235783Skib
8957235783Skib	dev_priv->display.init_clock_gating(dev);
8958235783Skib
8959235783Skib	if (dev_priv->display.init_pch_clock_gating)
8960235783Skib		dev_priv->display.init_pch_clock_gating(dev);
8961235783Skib}
8962235783Skib
8963235783Skib/* Set up chip specific display functions */
8964235783Skibstatic void intel_init_display(struct drm_device *dev)
8965235783Skib{
8966235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
8967235783Skib
8968235783Skib	/* We always want a DPMS function */
8969235783Skib	if (HAS_PCH_SPLIT(dev)) {
8970235783Skib		dev_priv->display.dpms = ironlake_crtc_dpms;
8971235783Skib		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8972235783Skib		dev_priv->display.update_plane = ironlake_update_plane;
8973235783Skib	} else {
8974235783Skib		dev_priv->display.dpms = i9xx_crtc_dpms;
8975235783Skib		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8976235783Skib		dev_priv->display.update_plane = i9xx_update_plane;
8977235783Skib	}
8978235783Skib
8979235783Skib	if (I915_HAS_FBC(dev)) {
8980235783Skib		if (HAS_PCH_SPLIT(dev)) {
8981235783Skib			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8982235783Skib			dev_priv->display.enable_fbc = ironlake_enable_fbc;
8983235783Skib			dev_priv->display.disable_fbc = ironlake_disable_fbc;
8984235783Skib		} else if (IS_GM45(dev)) {
8985235783Skib			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8986235783Skib			dev_priv->display.enable_fbc = g4x_enable_fbc;
8987235783Skib			dev_priv->display.disable_fbc = g4x_disable_fbc;
8988235783Skib		} else if (IS_CRESTLINE(dev)) {
8989235783Skib			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8990235783Skib			dev_priv->display.enable_fbc = i8xx_enable_fbc;
8991235783Skib			dev_priv->display.disable_fbc = i8xx_disable_fbc;
8992235783Skib		}
8993235783Skib		/* 855GM needs testing */
8994235783Skib	}
8995235783Skib
8996235783Skib	/* Returns the core display clock speed */
8997235783Skib	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8998235783Skib		dev_priv->display.get_display_clock_speed =
8999235783Skib			i945_get_display_clock_speed;
9000235783Skib	else if (IS_I915G(dev))
9001235783Skib		dev_priv->display.get_display_clock_speed =
9002235783Skib			i915_get_display_clock_speed;
9003235783Skib	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9004235783Skib		dev_priv->display.get_display_clock_speed =
9005235783Skib			i9xx_misc_get_display_clock_speed;
9006235783Skib	else if (IS_I915GM(dev))
9007235783Skib		dev_priv->display.get_display_clock_speed =
9008235783Skib			i915gm_get_display_clock_speed;
9009235783Skib	else if (IS_I865G(dev))
9010235783Skib		dev_priv->display.get_display_clock_speed =
9011235783Skib			i865_get_display_clock_speed;
9012235783Skib	else if (IS_I85X(dev))
9013235783Skib		dev_priv->display.get_display_clock_speed =
9014235783Skib			i855_get_display_clock_speed;
9015235783Skib	else /* 852, 830 */
9016235783Skib		dev_priv->display.get_display_clock_speed =
9017235783Skib			i830_get_display_clock_speed;
9018235783Skib
9019235783Skib	/* For FIFO watermark updates */
9020235783Skib	if (HAS_PCH_SPLIT(dev)) {
9021235783Skib		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9022235783Skib		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9023235783Skib
9024235783Skib		/* IVB configs may use multi-threaded forcewake */
9025235783Skib		if (IS_IVYBRIDGE(dev)) {
9026235783Skib			u32	ecobus;
9027235783Skib
9028235783Skib			/* A small trick here - if the bios hasn't configured MT forcewake,
9029235783Skib			 * and if the device is in RC6, then force_wake_mt_get will not wake
9030235783Skib			 * the device and the ECOBUS read will return zero. Which will be
9031235783Skib			 * (correctly) interpreted by the test below as MT forcewake being
9032235783Skib			 * disabled.
9033235783Skib			 */
9034235783Skib			DRM_LOCK(dev);
9035235783Skib			__gen6_gt_force_wake_mt_get(dev_priv);
9036235783Skib			ecobus = I915_READ_NOTRACE(ECOBUS);
9037235783Skib			__gen6_gt_force_wake_mt_put(dev_priv);
9038235783Skib			DRM_UNLOCK(dev);
9039235783Skib
9040235783Skib			if (ecobus & FORCEWAKE_MT_ENABLE) {
9041235783Skib				DRM_DEBUG_KMS("Using MT version of forcewake\n");
9042235783Skib				dev_priv->display.force_wake_get =
9043235783Skib					__gen6_gt_force_wake_mt_get;
9044235783Skib				dev_priv->display.force_wake_put =
9045235783Skib					__gen6_gt_force_wake_mt_put;
9046235783Skib			}
9047235783Skib		}
9048235783Skib
9049235783Skib		if (HAS_PCH_IBX(dev))
9050235783Skib			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9051235783Skib		else if (HAS_PCH_CPT(dev))
9052235783Skib			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9053235783Skib
9054235783Skib		if (IS_GEN5(dev)) {
9055235783Skib			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9056235783Skib				dev_priv->display.update_wm = ironlake_update_wm;
9057235783Skib			else {
9058235783Skib				DRM_DEBUG_KMS("Failed to get proper latency. "
9059235783Skib					      "Disable CxSR\n");
9060235783Skib				dev_priv->display.update_wm = NULL;
9061235783Skib			}
9062235783Skib			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9063235783Skib			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9064235783Skib			dev_priv->display.write_eld = ironlake_write_eld;
9065235783Skib		} else if (IS_GEN6(dev)) {
9066235783Skib			if (SNB_READ_WM0_LATENCY()) {
9067235783Skib				dev_priv->display.update_wm = sandybridge_update_wm;
9068235783Skib				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9069235783Skib			} else {
9070235783Skib				DRM_DEBUG_KMS("Failed to read display plane latency. "
9071235783Skib					      "Disable CxSR\n");
9072235783Skib				dev_priv->display.update_wm = NULL;
9073235783Skib			}
9074235783Skib			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9075235783Skib			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9076235783Skib			dev_priv->display.write_eld = ironlake_write_eld;
9077235783Skib		} else if (IS_IVYBRIDGE(dev)) {
9078235783Skib			/* FIXME: detect B0+ stepping and use auto training */
9079235783Skib			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9080235783Skib			if (SNB_READ_WM0_LATENCY()) {
9081235783Skib				dev_priv->display.update_wm = sandybridge_update_wm;
9082235783Skib				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9083235783Skib			} else {
9084235783Skib				DRM_DEBUG_KMS("Failed to read display plane latency. "
9085235783Skib					      "Disable CxSR\n");
9086235783Skib				dev_priv->display.update_wm = NULL;
9087235783Skib			}
9088235783Skib			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9089235783Skib			dev_priv->display.write_eld = ironlake_write_eld;
9090235783Skib		} else
9091235783Skib			dev_priv->display.update_wm = NULL;
9092235783Skib	} else if (IS_PINEVIEW(dev)) {
9093235783Skib		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9094235783Skib					    dev_priv->is_ddr3,
9095235783Skib					    dev_priv->fsb_freq,
9096235783Skib					    dev_priv->mem_freq)) {
9097235783Skib			DRM_INFO("failed to find known CxSR latency "
9098235783Skib				 "(found ddr%s fsb freq %d, mem freq %d), "
9099235783Skib				 "disabling CxSR\n",
9100235783Skib				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9101235783Skib				 dev_priv->fsb_freq, dev_priv->mem_freq);
9102235783Skib			/* Disable CxSR and never update its watermark again */
9103235783Skib			pineview_disable_cxsr(dev);
9104235783Skib			dev_priv->display.update_wm = NULL;
9105235783Skib		} else
9106235783Skib			dev_priv->display.update_wm = pineview_update_wm;
9107235783Skib		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9108235783Skib	} else if (IS_G4X(dev)) {
9109235783Skib		dev_priv->display.write_eld = g4x_write_eld;
9110235783Skib		dev_priv->display.update_wm = g4x_update_wm;
9111235783Skib		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9112235783Skib	} else if (IS_GEN4(dev)) {
9113235783Skib		dev_priv->display.update_wm = i965_update_wm;
9114235783Skib		if (IS_CRESTLINE(dev))
9115235783Skib			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9116235783Skib		else if (IS_BROADWATER(dev))
9117235783Skib			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9118235783Skib	} else if (IS_GEN3(dev)) {
9119235783Skib		dev_priv->display.update_wm = i9xx_update_wm;
9120235783Skib		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9121235783Skib		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9122235783Skib	} else if (IS_I865G(dev)) {
9123235783Skib		dev_priv->display.update_wm = i830_update_wm;
9124235783Skib		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9125235783Skib		dev_priv->display.get_fifo_size = i830_get_fifo_size;
9126235783Skib	} else if (IS_I85X(dev)) {
9127235783Skib		dev_priv->display.update_wm = i9xx_update_wm;
9128235783Skib		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9129235783Skib		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9130235783Skib	} else {
9131235783Skib		dev_priv->display.update_wm = i830_update_wm;
9132235783Skib		dev_priv->display.init_clock_gating = i830_init_clock_gating;
9133235783Skib		if (IS_845G(dev))
9134235783Skib			dev_priv->display.get_fifo_size = i845_get_fifo_size;
9135235783Skib		else
9136235783Skib			dev_priv->display.get_fifo_size = i830_get_fifo_size;
9137235783Skib	}
9138235783Skib
9139235783Skib	/* Default just returns -ENODEV to indicate unsupported */
9140235783Skib	dev_priv->display.queue_flip = intel_default_queue_flip;
9141235783Skib
9142235783Skib	switch (INTEL_INFO(dev)->gen) {
9143235783Skib	case 2:
9144235783Skib		dev_priv->display.queue_flip = intel_gen2_queue_flip;
9145235783Skib		break;
9146235783Skib
9147235783Skib	case 3:
9148235783Skib		dev_priv->display.queue_flip = intel_gen3_queue_flip;
9149235783Skib		break;
9150235783Skib
9151235783Skib	case 4:
9152235783Skib	case 5:
9153235783Skib		dev_priv->display.queue_flip = intel_gen4_queue_flip;
9154235783Skib		break;
9155235783Skib
9156235783Skib	case 6:
9157235783Skib		dev_priv->display.queue_flip = intel_gen6_queue_flip;
9158235783Skib		break;
9159235783Skib	case 7:
9160235783Skib		dev_priv->display.queue_flip = intel_gen7_queue_flip;
9161235783Skib		break;
9162235783Skib	}
9163235783Skib}
9164235783Skib
9165235783Skib/*
9166235783Skib * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9167235783Skib * resume, or other times.  This quirk makes sure that's the case for
9168235783Skib * affected systems.
9169235783Skib */
9170235783Skibstatic void quirk_pipea_force(struct drm_device *dev)
9171235783Skib{
9172235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
9173235783Skib
9174235783Skib	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9175235783Skib	DRM_DEBUG("applying pipe a force quirk\n");
9176235783Skib}
9177235783Skib
9178235783Skib/*
9179235783Skib * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9180235783Skib */
9181235783Skibstatic void quirk_ssc_force_disable(struct drm_device *dev)
9182235783Skib{
9183235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
9184235783Skib	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9185235783Skib}
9186235783Skib
9187235783Skibstruct intel_quirk {
9188235783Skib	int device;
9189235783Skib	int subsystem_vendor;
9190235783Skib	int subsystem_device;
9191235783Skib	void (*hook)(struct drm_device *dev);
9192235783Skib};
9193235783Skib
9194235783Skib#define	PCI_ANY_ID	(~0u)
9195235783Skib
9196235783Skibstruct intel_quirk intel_quirks[] = {
9197235783Skib	/* HP Mini needs pipe A force quirk (LP: #322104) */
9198235783Skib	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9199235783Skib
9200235783Skib	/* Thinkpad R31 needs pipe A force quirk */
9201235783Skib	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9202235783Skib	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9203235783Skib	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9204235783Skib
9205235783Skib	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9206235783Skib	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9207235783Skib	/* ThinkPad X40 needs pipe A force quirk */
9208235783Skib
9209235783Skib	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9210235783Skib	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9211235783Skib
9212235783Skib	/* 855 & before need to leave pipe A & dpll A up */
9213235783Skib	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9214235783Skib	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9215235783Skib
9216235783Skib	/* Lenovo U160 cannot use SSC on LVDS */
9217235783Skib	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9218235783Skib
9219235783Skib	/* Sony Vaio Y cannot use SSC on LVDS */
9220235783Skib	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9221235783Skib};
9222235783Skib
9223235783Skibstatic void intel_init_quirks(struct drm_device *dev)
9224235783Skib{
9225235783Skib	struct intel_quirk *q;
9226235783Skib	device_t d;
9227235783Skib	int i;
9228235783Skib
9229235783Skib	d = dev->device;
9230235783Skib	for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
9231235783Skib		q = &intel_quirks[i];
9232235783Skib		if (pci_get_device(d) == q->device &&
9233235783Skib		    (pci_get_subvendor(d) == q->subsystem_vendor ||
9234235783Skib		     q->subsystem_vendor == PCI_ANY_ID) &&
9235235783Skib		    (pci_get_subdevice(d) == q->subsystem_device ||
9236235783Skib		     q->subsystem_device == PCI_ANY_ID))
9237235783Skib			q->hook(dev);
9238235783Skib	}
9239235783Skib}
9240235783Skib
9241235783Skib/* Disable the VGA plane that we never use */
9242235783Skibstatic void i915_disable_vga(struct drm_device *dev)
9243235783Skib{
9244235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
9245235783Skib	u8 sr1;
9246235783Skib	u32 vga_reg;
9247235783Skib
9248235783Skib	if (HAS_PCH_SPLIT(dev))
9249235783Skib		vga_reg = CPU_VGACNTRL;
9250235783Skib	else
9251235783Skib		vga_reg = VGACNTRL;
9252235783Skib
9253235783Skib#if 0
9254235783Skib	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9255235783Skib#endif
9256235783Skib	outb(VGA_SR_INDEX, 1);
9257235783Skib	sr1 = inb(VGA_SR_DATA);
9258235783Skib	outb(VGA_SR_DATA, sr1 | 1 << 5);
9259235783Skib#if 0
9260235783Skib	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9261235783Skib#endif
9262235783Skib	DELAY(300);
9263235783Skib
9264235783Skib	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9265235783Skib	POSTING_READ(vga_reg);
9266235783Skib}
9267235783Skib
9268235783Skibvoid intel_modeset_init(struct drm_device *dev)
9269235783Skib{
9270235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
9271235783Skib	int i, ret;
9272235783Skib
9273235783Skib	drm_mode_config_init(dev);
9274235783Skib
9275235783Skib	dev->mode_config.min_width = 0;
9276235783Skib	dev->mode_config.min_height = 0;
9277235783Skib
9278235783Skib	dev->mode_config.preferred_depth = 24;
9279235783Skib	dev->mode_config.prefer_shadow = 1;
9280235783Skib
9281235783Skib	dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
9282235783Skib	    &intel_mode_funcs);
9283235783Skib
9284235783Skib	intel_init_quirks(dev);
9285235783Skib
9286235783Skib	intel_init_display(dev);
9287235783Skib
9288235783Skib	if (IS_GEN2(dev)) {
9289235783Skib		dev->mode_config.max_width = 2048;
9290235783Skib		dev->mode_config.max_height = 2048;
9291235783Skib	} else if (IS_GEN3(dev)) {
9292235783Skib		dev->mode_config.max_width = 4096;
9293235783Skib		dev->mode_config.max_height = 4096;
9294235783Skib	} else {
9295235783Skib		dev->mode_config.max_width = 8192;
9296235783Skib		dev->mode_config.max_height = 8192;
9297235783Skib	}
9298235783Skib	dev->mode_config.fb_base = dev->agp->base;
9299235783Skib
9300235783Skib	DRM_DEBUG_KMS("%d display pipe%s available.\n",
9301235783Skib		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9302235783Skib
9303235783Skib	for (i = 0; i < dev_priv->num_pipe; i++) {
9304235783Skib		intel_crtc_init(dev, i);
9305235783Skib		ret = intel_plane_init(dev, i);
9306235783Skib		if (ret)
9307235783Skib			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9308235783Skib	}
9309235783Skib
9310235783Skib	/* Just disable it once at startup */
9311235783Skib	i915_disable_vga(dev);
9312235783Skib	intel_setup_outputs(dev);
9313235783Skib
9314235783Skib	intel_init_clock_gating(dev);
9315235783Skib
9316235783Skib	if (IS_IRONLAKE_M(dev)) {
9317235783Skib		ironlake_enable_drps(dev);
9318235783Skib		intel_init_emon(dev);
9319235783Skib	}
9320235783Skib
9321235783Skib	if (IS_GEN6(dev)) {
9322235783Skib		gen6_enable_rps(dev_priv);
9323235783Skib		gen6_update_ring_freq(dev_priv);
9324235783Skib	}
9325235783Skib
9326235783Skib	TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
9327235783Skib	callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
9328235783Skib}
9329235783Skib
9330235783Skibvoid intel_modeset_gem_init(struct drm_device *dev)
9331235783Skib{
9332235783Skib	if (IS_IRONLAKE_M(dev))
9333235783Skib		ironlake_enable_rc6(dev);
9334235783Skib
9335235783Skib	intel_setup_overlay(dev);
9336235783Skib}
9337235783Skib
9338235783Skibvoid intel_modeset_cleanup(struct drm_device *dev)
9339235783Skib{
9340235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
9341235783Skib	struct drm_crtc *crtc;
9342235783Skib	struct intel_crtc *intel_crtc;
9343235783Skib
9344235783Skib	drm_kms_helper_poll_fini(dev);
9345235783Skib	DRM_LOCK(dev);
9346235783Skib
9347235783Skib#if 0
9348235783Skib	intel_unregister_dsm_handler();
9349235783Skib#endif
9350235783Skib
9351235783Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9352235783Skib		/* Skip inactive CRTCs */
9353235783Skib		if (!crtc->fb)
9354235783Skib			continue;
9355235783Skib
9356235783Skib		intel_crtc = to_intel_crtc(crtc);
9357235783Skib		intel_increase_pllclock(crtc);
9358235783Skib	}
9359235783Skib
9360235783Skib	intel_disable_fbc(dev);
9361235783Skib
9362235783Skib	if (IS_IRONLAKE_M(dev))
9363235783Skib		ironlake_disable_drps(dev);
9364235783Skib	if (IS_GEN6(dev))
9365235783Skib		gen6_disable_rps(dev);
9366235783Skib
9367235783Skib	if (IS_IRONLAKE_M(dev))
9368235783Skib		ironlake_disable_rc6(dev);
9369235783Skib
9370235783Skib	/* Disable the irq before mode object teardown, for the irq might
9371235783Skib	 * enqueue unpin/hotplug work. */
9372235783Skib	drm_irq_uninstall(dev);
9373235783Skib	DRM_UNLOCK(dev);
9374235783Skib
9375235783Skib	if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
9376235783Skib		taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
9377235783Skib	if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
9378235783Skib		taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
9379235783Skib
9380235783Skib	/* Shut off idle work before the crtcs get freed. */
9381235783Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9382235783Skib		intel_crtc = to_intel_crtc(crtc);
9383235783Skib		callout_drain(&intel_crtc->idle_callout);
9384235783Skib	}
9385235783Skib	callout_drain(&dev_priv->idle_callout);
9386235783Skib	if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
9387235783Skib		taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
9388235783Skib
9389235783Skib	drm_mode_config_cleanup(dev);
9390235783Skib}
9391235783Skib
9392235783Skib/*
9393235783Skib * Return which encoder is currently attached for connector.
9394235783Skib */
9395235783Skibstruct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9396235783Skib{
9397235783Skib	return &intel_attached_encoder(connector)->base;
9398235783Skib}
9399235783Skib
9400235783Skibvoid intel_connector_attach_encoder(struct intel_connector *connector,
9401235783Skib				    struct intel_encoder *encoder)
9402235783Skib{
9403235783Skib	connector->encoder = encoder;
9404235783Skib	drm_mode_connector_attach_encoder(&connector->base,
9405235783Skib					  &encoder->base);
9406235783Skib}
9407235783Skib
9408235783Skib/*
9409235783Skib * set vga decode state - true == enable VGA decode
9410235783Skib */
9411235783Skibint intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9412235783Skib{
9413235783Skib	struct drm_i915_private *dev_priv;
9414235783Skib	device_t bridge_dev;
9415235783Skib	u16 gmch_ctrl;
9416235783Skib
9417235783Skib	dev_priv = dev->dev_private;
9418235783Skib	bridge_dev = intel_gtt_get_bridge_device();
9419235783Skib	gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
9420235783Skib	if (state)
9421235783Skib		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9422235783Skib	else
9423235783Skib		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9424235783Skib	pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
9425235783Skib	return (0);
9426235783Skib}
9427235783Skib
9428235783Skibstruct intel_display_error_state {
9429235783Skib	struct intel_cursor_error_state {
9430235783Skib		u32 control;
9431235783Skib		u32 position;
9432235783Skib		u32 base;
9433235783Skib		u32 size;
9434235783Skib	} cursor[2];
9435235783Skib
9436235783Skib	struct intel_pipe_error_state {
9437235783Skib		u32 conf;
9438235783Skib		u32 source;
9439235783Skib
9440235783Skib		u32 htotal;
9441235783Skib		u32 hblank;
9442235783Skib		u32 hsync;
9443235783Skib		u32 vtotal;
9444235783Skib		u32 vblank;
9445235783Skib		u32 vsync;
9446235783Skib	} pipe[2];
9447235783Skib
9448235783Skib	struct intel_plane_error_state {
9449235783Skib		u32 control;
9450235783Skib		u32 stride;
9451235783Skib		u32 size;
9452235783Skib		u32 pos;
9453235783Skib		u32 addr;
9454235783Skib		u32 surface;
9455235783Skib		u32 tile_offset;
9456235783Skib	} plane[2];
9457235783Skib};
9458235783Skib
9459235783Skibstruct intel_display_error_state *
9460235783Skibintel_display_capture_error_state(struct drm_device *dev)
9461235783Skib{
9462235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
9463235783Skib	struct intel_display_error_state *error;
9464235783Skib	int i;
9465235783Skib
9466235783Skib	error = malloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
9467235783Skib	if (error == NULL)
9468235783Skib		return NULL;
9469235783Skib
9470235783Skib	for (i = 0; i < 2; i++) {
9471235783Skib		error->cursor[i].control = I915_READ(CURCNTR(i));
9472235783Skib		error->cursor[i].position = I915_READ(CURPOS(i));
9473235783Skib		error->cursor[i].base = I915_READ(CURBASE(i));
9474235783Skib
9475235783Skib		error->plane[i].control = I915_READ(DSPCNTR(i));
9476235783Skib		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9477235783Skib		error->plane[i].size = I915_READ(DSPSIZE(i));
9478235783Skib		error->plane[i].pos = I915_READ(DSPPOS(i));
9479235783Skib		error->plane[i].addr = I915_READ(DSPADDR(i));
9480235783Skib		if (INTEL_INFO(dev)->gen >= 4) {
9481235783Skib			error->plane[i].surface = I915_READ(DSPSURF(i));
9482235783Skib			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9483235783Skib		}
9484235783Skib
9485235783Skib		error->pipe[i].conf = I915_READ(PIPECONF(i));
9486235783Skib		error->pipe[i].source = I915_READ(PIPESRC(i));
9487235783Skib		error->pipe[i].htotal = I915_READ(HTOTAL(i));
9488235783Skib		error->pipe[i].hblank = I915_READ(HBLANK(i));
9489235783Skib		error->pipe[i].hsync = I915_READ(HSYNC(i));
9490235783Skib		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9491235783Skib		error->pipe[i].vblank = I915_READ(VBLANK(i));
9492235783Skib		error->pipe[i].vsync = I915_READ(VSYNC(i));
9493235783Skib	}
9494235783Skib
9495235783Skib	return error;
9496235783Skib}
9497235783Skib
9498235783Skibvoid
9499235783Skibintel_display_print_error_state(struct sbuf *m,
9500235783Skib				struct drm_device *dev,
9501235783Skib				struct intel_display_error_state *error)
9502235783Skib{
9503235783Skib	int i;
9504235783Skib
9505235783Skib	for (i = 0; i < 2; i++) {
9506235783Skib		sbuf_printf(m, "Pipe [%d]:\n", i);
9507235783Skib		sbuf_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9508235783Skib		sbuf_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9509235783Skib		sbuf_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9510235783Skib		sbuf_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9511235783Skib		sbuf_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9512235783Skib		sbuf_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9513235783Skib		sbuf_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9514235783Skib		sbuf_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9515235783Skib
9516235783Skib		sbuf_printf(m, "Plane [%d]:\n", i);
9517235783Skib		sbuf_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9518235783Skib		sbuf_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9519235783Skib		sbuf_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9520235783Skib		sbuf_printf(m, "  POS: %08x\n", error->plane[i].pos);
9521235783Skib		sbuf_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9522235783Skib		if (INTEL_INFO(dev)->gen >= 4) {
9523235783Skib			sbuf_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9524235783Skib			sbuf_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9525235783Skib		}
9526235783Skib
9527235783Skib		sbuf_printf(m, "Cursor [%d]:\n", i);
9528235783Skib		sbuf_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9529235783Skib		sbuf_printf(m, "  POS: %08x\n", error->cursor[i].position);
9530235783Skib		sbuf_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9531235783Skib	}
9532235783Skib}
9533