1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2023 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "i9xx_wm.h"
9#include "intel_atomic.h"
10#include "intel_display.h"
11#include "intel_display_trace.h"
12#include "intel_mchbar_regs.h"
13#include "intel_wm.h"
14#include "skl_watermark.h"
15#include "vlv_sideband.h"
16
17/* used in computing the new watermarks state */
18struct intel_wm_config {
19	unsigned int num_pipes_active;
20	bool sprites_enabled;
21	bool sprites_scaled;
22};
23
24struct cxsr_latency {
25	bool is_desktop : 1;
26	bool is_ddr3 : 1;
27	u16 fsb_freq;
28	u16 mem_freq;
29	u16 display_sr;
30	u16 display_hpll_disable;
31	u16 cursor_sr;
32	u16 cursor_hpll_disable;
33};
34
35static const struct cxsr_latency cxsr_latency_table[] = {
36	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
37	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
38	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
39	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
40	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
41
42	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
43	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
44	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
45	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
46	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
47
48	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
49	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
50	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
51	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
52	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
53
54	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
55	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
56	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
57	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
58	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
59
60	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
61	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
62	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
63	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
64	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
65
66	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
67	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
68	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
69	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
70	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
71};
72
73static const struct cxsr_latency *intel_get_cxsr_latency(struct drm_i915_private *i915)
74{
75	int i;
76
77	if (i915->fsb_freq == 0 || i915->mem_freq == 0)
78		return NULL;
79
80	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
81		const struct cxsr_latency *latency = &cxsr_latency_table[i];
82		bool is_desktop = !IS_MOBILE(i915);
83
84		if (is_desktop == latency->is_desktop &&
85		    i915->is_ddr3 == latency->is_ddr3 &&
86		    i915->fsb_freq == latency->fsb_freq &&
87		    i915->mem_freq == latency->mem_freq)
88			return latency;
89	}
90
91	drm_dbg_kms(&i915->drm, "Unknown FSB/MEM found, disable CxSR\n");
92
93	return NULL;
94}
95
96static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
97{
98	u32 val;
99
100	vlv_punit_get(dev_priv);
101
102	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
103	if (enable)
104		val &= ~FORCE_DDR_HIGH_FREQ;
105	else
106		val |= FORCE_DDR_HIGH_FREQ;
107	val &= ~FORCE_DDR_LOW_FREQ;
108	val |= FORCE_DDR_FREQ_REQ_ACK;
109	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
110
111	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
112		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
113		drm_err(&dev_priv->drm,
114			"timed out waiting for Punit DDR DVFS request\n");
115
116	vlv_punit_put(dev_priv);
117}
118
119static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
120{
121	u32 val;
122
123	vlv_punit_get(dev_priv);
124
125	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
126	if (enable)
127		val |= DSP_MAXFIFO_PM5_ENABLE;
128	else
129		val &= ~DSP_MAXFIFO_PM5_ENABLE;
130	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
131
132	vlv_punit_put(dev_priv);
133}
134
135#define FW_WM(value, plane) \
136	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
137
138static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
139{
140	bool was_enabled;
141	u32 val;
142
143	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
144		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
145		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
146		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
147	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
148		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
149		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
150		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
151	} else if (IS_PINEVIEW(dev_priv)) {
152		val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
153		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
154		if (enable)
155			val |= PINEVIEW_SELF_REFRESH_EN;
156		else
157			val &= ~PINEVIEW_SELF_REFRESH_EN;
158		intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
159		intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
160	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
161		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
162		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
163			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
164		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
165		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
166	} else if (IS_I915GM(dev_priv)) {
167		/*
168		 * FIXME can't find a bit like this for 915G, and
169		 * yet it does have the related watermark in
170		 * FW_BLC_SELF. What's going on?
171		 */
172		was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
173		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
174			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
175		intel_uncore_write(&dev_priv->uncore, INSTPM, val);
176		intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
177	} else {
178		return false;
179	}
180
181	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
182
183	drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
184		    str_enabled_disabled(enable),
185		    str_enabled_disabled(was_enabled));
186
187	return was_enabled;
188}
189
190/**
191 * intel_set_memory_cxsr - Configure CxSR state
192 * @dev_priv: i915 device
193 * @enable: Allow vs. disallow CxSR
194 *
195 * Allow or disallow the system to enter a special CxSR
196 * (C-state self refresh) state. What typically happens in CxSR mode
197 * is that several display FIFOs may get combined into a single larger
198 * FIFO for a particular plane (so called max FIFO mode) to allow the
199 * system to defer memory fetches longer, and the memory will enter
200 * self refresh.
201 *
202 * Note that enabling CxSR does not guarantee that the system enter
203 * this special mode, nor does it guarantee that the system stays
204 * in that mode once entered. So this just allows/disallows the system
205 * to autonomously utilize the CxSR mode. Other factors such as core
206 * C-states will affect when/if the system actually enters/exits the
207 * CxSR mode.
208 *
209 * Note that on VLV/CHV this actually only controls the max FIFO mode,
210 * and the system is free to enter/exit memory self refresh at any time
211 * even when the use of CxSR has been disallowed.
212 *
213 * While the system is actually in the CxSR/max FIFO mode, some plane
214 * control registers will not get latched on vblank. Thus in order to
215 * guarantee the system will respond to changes in the plane registers
216 * we must always disallow CxSR prior to making changes to those registers.
217 * Unfortunately the system will re-evaluate the CxSR conditions at
218 * frame start which happens after vblank start (which is when the plane
219 * registers would get latched), so we can't proceed with the plane update
220 * during the same frame where we disallowed CxSR.
221 *
222 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
223 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
224 * the hardware w.r.t. HPLL SR when writing to plane registers.
225 * Disallowing just CxSR is sufficient.
226 */
227bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
228{
229	bool ret;
230
231	mutex_lock(&dev_priv->display.wm.wm_mutex);
232	ret = _intel_set_memory_cxsr(dev_priv, enable);
233	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
234		dev_priv->display.wm.vlv.cxsr = enable;
235	else if (IS_G4X(dev_priv))
236		dev_priv->display.wm.g4x.cxsr = enable;
237	mutex_unlock(&dev_priv->display.wm.wm_mutex);
238
239	return ret;
240}
241
242/*
243 * Latency for FIFO fetches is dependent on several factors:
244 *   - memory configuration (speed, channels)
245 *   - chipset
246 *   - current MCH state
247 * It can be fairly high in some situations, so here we assume a fairly
248 * pessimal value.  It's a tradeoff between extra memory fetches (if we
249 * set this value too high, the FIFO will fetch frequently to stay full)
250 * and power consumption (set it too low to save power and we might see
251 * FIFO underruns and display "flicker").
252 *
253 * A value of 5us seems to be a good balance; safe for very low end
254 * platforms but not overly aggressive on lower latency configs.
255 */
256static const int pessimal_latency_ns = 5000;
257
258#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
259	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
260
261static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
262{
263	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
264	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
265	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
266	enum pipe pipe = crtc->pipe;
267	int sprite0_start, sprite1_start;
268	u32 dsparb, dsparb2, dsparb3;
269
270	switch (pipe) {
271	case PIPE_A:
272		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
273		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
274		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
275		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
276		break;
277	case PIPE_B:
278		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
279		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
280		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
281		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
282		break;
283	case PIPE_C:
284		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
285		dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
286		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
287		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
288		break;
289	default:
290		MISSING_CASE(pipe);
291		return;
292	}
293
294	fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
295	fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
296	fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
297	fifo_state->plane[PLANE_CURSOR] = 63;
298}
299
300static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
301			      enum i9xx_plane_id i9xx_plane)
302{
303	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
304	int size;
305
306	size = dsparb & 0x7f;
307	if (i9xx_plane == PLANE_B)
308		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
309
310	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
311		    dsparb, plane_name(i9xx_plane), size);
312
313	return size;
314}
315
316static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
317			      enum i9xx_plane_id i9xx_plane)
318{
319	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
320	int size;
321
322	size = dsparb & 0x1ff;
323	if (i9xx_plane == PLANE_B)
324		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
325	size >>= 1; /* Convert to cachelines */
326
327	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
328		    dsparb, plane_name(i9xx_plane), size);
329
330	return size;
331}
332
333static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
334			      enum i9xx_plane_id i9xx_plane)
335{
336	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
337	int size;
338
339	size = dsparb & 0x7f;
340	size >>= 2; /* Convert to cachelines */
341
342	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
343		    dsparb, plane_name(i9xx_plane), size);
344
345	return size;
346}
347
348/* Pineview has different values for various configs */
349static const struct intel_watermark_params pnv_display_wm = {
350	.fifo_size = PINEVIEW_DISPLAY_FIFO,
351	.max_wm = PINEVIEW_MAX_WM,
352	.default_wm = PINEVIEW_DFT_WM,
353	.guard_size = PINEVIEW_GUARD_WM,
354	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
355};
356
357static const struct intel_watermark_params pnv_display_hplloff_wm = {
358	.fifo_size = PINEVIEW_DISPLAY_FIFO,
359	.max_wm = PINEVIEW_MAX_WM,
360	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
361	.guard_size = PINEVIEW_GUARD_WM,
362	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
363};
364
365static const struct intel_watermark_params pnv_cursor_wm = {
366	.fifo_size = PINEVIEW_CURSOR_FIFO,
367	.max_wm = PINEVIEW_CURSOR_MAX_WM,
368	.default_wm = PINEVIEW_CURSOR_DFT_WM,
369	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
370	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
371};
372
373static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
374	.fifo_size = PINEVIEW_CURSOR_FIFO,
375	.max_wm = PINEVIEW_CURSOR_MAX_WM,
376	.default_wm = PINEVIEW_CURSOR_DFT_WM,
377	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
378	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
379};
380
381static const struct intel_watermark_params i965_cursor_wm_info = {
382	.fifo_size = I965_CURSOR_FIFO,
383	.max_wm = I965_CURSOR_MAX_WM,
384	.default_wm = I965_CURSOR_DFT_WM,
385	.guard_size = 2,
386	.cacheline_size = I915_FIFO_LINE_SIZE,
387};
388
389static const struct intel_watermark_params i945_wm_info = {
390	.fifo_size = I945_FIFO_SIZE,
391	.max_wm = I915_MAX_WM,
392	.default_wm = 1,
393	.guard_size = 2,
394	.cacheline_size = I915_FIFO_LINE_SIZE,
395};
396
397static const struct intel_watermark_params i915_wm_info = {
398	.fifo_size = I915_FIFO_SIZE,
399	.max_wm = I915_MAX_WM,
400	.default_wm = 1,
401	.guard_size = 2,
402	.cacheline_size = I915_FIFO_LINE_SIZE,
403};
404
405static const struct intel_watermark_params i830_a_wm_info = {
406	.fifo_size = I855GM_FIFO_SIZE,
407	.max_wm = I915_MAX_WM,
408	.default_wm = 1,
409	.guard_size = 2,
410	.cacheline_size = I830_FIFO_LINE_SIZE,
411};
412
413static const struct intel_watermark_params i830_bc_wm_info = {
414	.fifo_size = I855GM_FIFO_SIZE,
415	.max_wm = I915_MAX_WM / 2,
416	.default_wm = 1,
417	.guard_size = 2,
418	.cacheline_size = I830_FIFO_LINE_SIZE,
419};
420
421static const struct intel_watermark_params i845_wm_info = {
422	.fifo_size = I830_FIFO_SIZE,
423	.max_wm = I915_MAX_WM,
424	.default_wm = 1,
425	.guard_size = 2,
426	.cacheline_size = I830_FIFO_LINE_SIZE,
427};
428
429/**
430 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
431 * @pixel_rate: Pipe pixel rate in kHz
432 * @cpp: Plane bytes per pixel
433 * @latency: Memory wakeup latency in 0.1us units
434 *
435 * Compute the watermark using the method 1 or "small buffer"
436 * formula. The caller may additonally add extra cachelines
437 * to account for TLB misses and clock crossings.
438 *
439 * This method is concerned with the short term drain rate
440 * of the FIFO, ie. it does not account for blanking periods
441 * which would effectively reduce the average drain rate across
442 * a longer period. The name "small" refers to the fact the
443 * FIFO is relatively small compared to the amount of data
444 * fetched.
445 *
446 * The FIFO level vs. time graph might look something like:
447 *
448 *   |\   |\
449 *   | \  | \
450 * __---__---__ (- plane active, _ blanking)
451 * -> time
452 *
453 * or perhaps like this:
454 *
455 *   |\|\  |\|\
456 * __----__----__ (- plane active, _ blanking)
457 * -> time
458 *
459 * Returns:
460 * The watermark in bytes
461 */
462static unsigned int intel_wm_method1(unsigned int pixel_rate,
463				     unsigned int cpp,
464				     unsigned int latency)
465{
466	u64 ret;
467
468	ret = mul_u32_u32(pixel_rate, cpp * latency);
469	ret = DIV_ROUND_UP_ULL(ret, 10000);
470
471	return ret;
472}
473
474/**
475 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
476 * @pixel_rate: Pipe pixel rate in kHz
477 * @htotal: Pipe horizontal total
478 * @width: Plane width in pixels
479 * @cpp: Plane bytes per pixel
480 * @latency: Memory wakeup latency in 0.1us units
481 *
482 * Compute the watermark using the method 2 or "large buffer"
483 * formula. The caller may additonally add extra cachelines
484 * to account for TLB misses and clock crossings.
485 *
486 * This method is concerned with the long term drain rate
487 * of the FIFO, ie. it does account for blanking periods
488 * which effectively reduce the average drain rate across
489 * a longer period. The name "large" refers to the fact the
490 * FIFO is relatively large compared to the amount of data
491 * fetched.
492 *
493 * The FIFO level vs. time graph might look something like:
494 *
495 *    |\___       |\___
496 *    |    \___   |    \___
497 *    |        \  |        \
498 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
499 * -> time
500 *
501 * Returns:
502 * The watermark in bytes
503 */
504static unsigned int intel_wm_method2(unsigned int pixel_rate,
505				     unsigned int htotal,
506				     unsigned int width,
507				     unsigned int cpp,
508				     unsigned int latency)
509{
510	unsigned int ret;
511
512	/*
513	 * FIXME remove once all users are computing
514	 * watermarks in the correct place.
515	 */
516	if (WARN_ON_ONCE(htotal == 0))
517		htotal = 1;
518
519	ret = (latency * pixel_rate) / (htotal * 10000);
520	ret = (ret + 1) * width * cpp;
521
522	return ret;
523}
524
525/**
526 * intel_calculate_wm - calculate watermark level
527 * @i915: the device
528 * @pixel_rate: pixel clock
529 * @wm: chip FIFO params
530 * @fifo_size: size of the FIFO buffer
531 * @cpp: bytes per pixel
532 * @latency_ns: memory latency for the platform
533 *
534 * Calculate the watermark level (the level at which the display plane will
535 * start fetching from memory again).  Each chip has a different display
536 * FIFO size and allocation, so the caller needs to figure that out and pass
537 * in the correct intel_watermark_params structure.
538 *
539 * As the pixel clock runs, the FIFO will be drained at a rate that depends
540 * on the pixel size.  When it reaches the watermark level, it'll start
541 * fetching FIFO line sized based chunks from memory until the FIFO fills
542 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
543 * will occur, and a display engine hang could result.
544 */
545static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
546				       int pixel_rate,
547				       const struct intel_watermark_params *wm,
548				       int fifo_size, int cpp,
549				       unsigned int latency_ns)
550{
551	int entries, wm_size;
552
553	/*
554	 * Note: we need to make sure we don't overflow for various clock &
555	 * latency values.
556	 * clocks go from a few thousand to several hundred thousand.
557	 * latency is usually a few thousand
558	 */
559	entries = intel_wm_method1(pixel_rate, cpp,
560				   latency_ns / 100);
561	entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
562		wm->guard_size;
563	drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
564
565	wm_size = fifo_size - entries;
566	drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
567
568	/* Don't promote wm_size to unsigned... */
569	if (wm_size > wm->max_wm)
570		wm_size = wm->max_wm;
571	if (wm_size <= 0)
572		wm_size = wm->default_wm;
573
574	/*
575	 * Bspec seems to indicate that the value shouldn't be lower than
576	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
577	 * Lets go for 8 which is the burst size since certain platforms
578	 * already use a hardcoded 8 (which is what the spec says should be
579	 * done).
580	 */
581	if (wm_size <= 8)
582		wm_size = 8;
583
584	return wm_size;
585}
586
587static bool is_disabling(int old, int new, int threshold)
588{
589	return old >= threshold && new < threshold;
590}
591
592static bool is_enabling(int old, int new, int threshold)
593{
594	return old < threshold && new >= threshold;
595}
596
597static bool intel_crtc_active(struct intel_crtc *crtc)
598{
599	/* Be paranoid as we can arrive here with only partial
600	 * state retrieved from the hardware during setup.
601	 *
602	 * We can ditch the adjusted_mode.crtc_clock check as soon
603	 * as Haswell has gained clock readout/fastboot support.
604	 *
605	 * We can ditch the crtc->primary->state->fb check as soon as we can
606	 * properly reconstruct framebuffers.
607	 *
608	 * FIXME: The intel_crtc->active here should be switched to
609	 * crtc->state->active once we have proper CRTC states wired up
610	 * for atomic.
611	 */
612	return crtc->active && crtc->base.primary->state->fb &&
613		crtc->config->hw.adjusted_mode.crtc_clock;
614}
615
616static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
617{
618	struct intel_crtc *crtc, *enabled = NULL;
619
620	for_each_intel_crtc(&dev_priv->drm, crtc) {
621		if (intel_crtc_active(crtc)) {
622			if (enabled)
623				return NULL;
624			enabled = crtc;
625		}
626	}
627
628	return enabled;
629}
630
631static void pnv_update_wm(struct drm_i915_private *dev_priv)
632{
633	struct intel_crtc *crtc;
634	const struct cxsr_latency *latency;
635	u32 reg;
636	unsigned int wm;
637
638	latency = intel_get_cxsr_latency(dev_priv);
639	if (!latency) {
640		drm_dbg_kms(&dev_priv->drm,
641			    "Unknown FSB/MEM found, disable CxSR\n");
642		intel_set_memory_cxsr(dev_priv, false);
643		return;
644	}
645
646	crtc = single_enabled_crtc(dev_priv);
647	if (crtc) {
648		const struct drm_framebuffer *fb =
649			crtc->base.primary->state->fb;
650		int pixel_rate = crtc->config->pixel_rate;
651		int cpp = fb->format->cpp[0];
652
653		/* Display SR */
654		wm = intel_calculate_wm(dev_priv, pixel_rate,
655					&pnv_display_wm,
656					pnv_display_wm.fifo_size,
657					cpp, latency->display_sr);
658		reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
659		reg &= ~DSPFW_SR_MASK;
660		reg |= FW_WM(wm, SR);
661		intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
662		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
663
664		/* cursor SR */
665		wm = intel_calculate_wm(dev_priv, pixel_rate,
666					&pnv_cursor_wm,
667					pnv_display_wm.fifo_size,
668					4, latency->cursor_sr);
669		intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
670				 FW_WM(wm, CURSOR_SR));
671
672		/* Display HPLL off SR */
673		wm = intel_calculate_wm(dev_priv, pixel_rate,
674					&pnv_display_hplloff_wm,
675					pnv_display_hplloff_wm.fifo_size,
676					cpp, latency->display_hpll_disable);
677		intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
678
679		/* cursor HPLL off SR */
680		wm = intel_calculate_wm(dev_priv, pixel_rate,
681					&pnv_cursor_hplloff_wm,
682					pnv_display_hplloff_wm.fifo_size,
683					4, latency->cursor_hpll_disable);
684		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
685		reg &= ~DSPFW_HPLL_CURSOR_MASK;
686		reg |= FW_WM(wm, HPLL_CURSOR);
687		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
688		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
689
690		intel_set_memory_cxsr(dev_priv, true);
691	} else {
692		intel_set_memory_cxsr(dev_priv, false);
693	}
694}
695
696/*
697 * Documentation says:
698 * "If the line size is small, the TLB fetches can get in the way of the
699 *  data fetches, causing some lag in the pixel data return which is not
700 *  accounted for in the above formulas. The following adjustment only
701 *  needs to be applied if eight whole lines fit in the buffer at once.
702 *  The WM is adjusted upwards by the difference between the FIFO size
703 *  and the size of 8 whole lines. This adjustment is always performed
704 *  in the actual pixel depth regardless of whether FBC is enabled or not."
705 */
706static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
707{
708	int tlb_miss = fifo_size * 64 - width * cpp * 8;
709
710	return max(0, tlb_miss);
711}
712
713static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
714				const struct g4x_wm_values *wm)
715{
716	enum pipe pipe;
717
718	for_each_pipe(dev_priv, pipe)
719		trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
720
721	intel_uncore_write(&dev_priv->uncore, DSPFW1,
722			   FW_WM(wm->sr.plane, SR) |
723			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
724			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
725			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
726	intel_uncore_write(&dev_priv->uncore, DSPFW2,
727			   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
728			   FW_WM(wm->sr.fbc, FBC_SR) |
729			   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
730			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
731			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
732			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
733	intel_uncore_write(&dev_priv->uncore, DSPFW3,
734			   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
735			   FW_WM(wm->sr.cursor, CURSOR_SR) |
736			   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
737			   FW_WM(wm->hpll.plane, HPLL_SR));
738
739	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
740}
741
742#define FW_WM_VLV(value, plane) \
743	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
744
745static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
746				const struct vlv_wm_values *wm)
747{
748	enum pipe pipe;
749
750	for_each_pipe(dev_priv, pipe) {
751		trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
752
753		intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
754				   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
755				   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
756				   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
757				   (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
758	}
759
760	/*
761	 * Zero the (unused) WM1 watermarks, and also clear all the
762	 * high order bits so that there are no out of bounds values
763	 * present in the registers during the reprogramming.
764	 */
765	intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
766	intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
767	intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
768	intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
769	intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
770
771	intel_uncore_write(&dev_priv->uncore, DSPFW1,
772			   FW_WM(wm->sr.plane, SR) |
773			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
774			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
775			   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
776	intel_uncore_write(&dev_priv->uncore, DSPFW2,
777			   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
778			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
779			   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
780	intel_uncore_write(&dev_priv->uncore, DSPFW3,
781			   FW_WM(wm->sr.cursor, CURSOR_SR));
782
783	if (IS_CHERRYVIEW(dev_priv)) {
784		intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
785				   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
786				   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
787		intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
788				   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
789				   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
790		intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
791				   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
792				   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
793		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
794				   FW_WM(wm->sr.plane >> 9, SR_HI) |
795				   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
796				   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
797				   FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
798				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
799				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
800				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
801				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
802				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
803				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
804	} else {
805		intel_uncore_write(&dev_priv->uncore, DSPFW7,
806				   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
807				   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
808		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
809				   FW_WM(wm->sr.plane >> 9, SR_HI) |
810				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
811				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
812				   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
813				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
814				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
815				   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
816	}
817
818	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
819}
820
821#undef FW_WM_VLV
822
823static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
824{
825	/* all latencies in usec */
826	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
827	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
828	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
829
830	dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
831}
832
833static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
834{
835	/*
836	 * DSPCNTR[13] supposedly controls whether the
837	 * primary plane can use the FIFO space otherwise
838	 * reserved for the sprite plane. It's not 100% clear
839	 * what the actual FIFO size is, but it looks like we
840	 * can happily set both primary and sprite watermarks
841	 * up to 127 cachelines. So that would seem to mean
842	 * that either DSPCNTR[13] doesn't do anything, or that
843	 * the total FIFO is >= 256 cachelines in size. Either
844	 * way, we don't seem to have to worry about this
845	 * repartitioning as the maximum watermark value the
846	 * register can hold for each plane is lower than the
847	 * minimum FIFO size.
848	 */
849	switch (plane_id) {
850	case PLANE_CURSOR:
851		return 63;
852	case PLANE_PRIMARY:
853		return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
854	case PLANE_SPRITE0:
855		return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
856	default:
857		MISSING_CASE(plane_id);
858		return 0;
859	}
860}
861
862static int g4x_fbc_fifo_size(int level)
863{
864	switch (level) {
865	case G4X_WM_LEVEL_SR:
866		return 7;
867	case G4X_WM_LEVEL_HPLL:
868		return 15;
869	default:
870		MISSING_CASE(level);
871		return 0;
872	}
873}
874
875static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
876			  const struct intel_plane_state *plane_state,
877			  int level)
878{
879	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
880	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
881	const struct drm_display_mode *pipe_mode =
882		&crtc_state->hw.pipe_mode;
883	unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
884	unsigned int pixel_rate, htotal, cpp, width, wm;
885
886	if (latency == 0)
887		return USHRT_MAX;
888
889	if (!intel_wm_plane_visible(crtc_state, plane_state))
890		return 0;
891
892	cpp = plane_state->hw.fb->format->cpp[0];
893
894	/*
895	 * WaUse32BppForSRWM:ctg,elk
896	 *
897	 * The spec fails to list this restriction for the
898	 * HPLL watermark, which seems a little strange.
899	 * Let's use 32bpp for the HPLL watermark as well.
900	 */
901	if (plane->id == PLANE_PRIMARY &&
902	    level != G4X_WM_LEVEL_NORMAL)
903		cpp = max(cpp, 4u);
904
905	pixel_rate = crtc_state->pixel_rate;
906	htotal = pipe_mode->crtc_htotal;
907	width = drm_rect_width(&plane_state->uapi.src) >> 16;
908
909	if (plane->id == PLANE_CURSOR) {
910		wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
911	} else if (plane->id == PLANE_PRIMARY &&
912		   level == G4X_WM_LEVEL_NORMAL) {
913		wm = intel_wm_method1(pixel_rate, cpp, latency);
914	} else {
915		unsigned int small, large;
916
917		small = intel_wm_method1(pixel_rate, cpp, latency);
918		large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
919
920		wm = min(small, large);
921	}
922
923	wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
924			      width, cpp);
925
926	wm = DIV_ROUND_UP(wm, 64) + 2;
927
928	return min_t(unsigned int, wm, USHRT_MAX);
929}
930
931static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
932				 int level, enum plane_id plane_id, u16 value)
933{
934	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
935	bool dirty = false;
936
937	for (; level < dev_priv->display.wm.num_levels; level++) {
938		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
939
940		dirty |= raw->plane[plane_id] != value;
941		raw->plane[plane_id] = value;
942	}
943
944	return dirty;
945}
946
947static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
948			       int level, u16 value)
949{
950	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
951	bool dirty = false;
952
953	/* NORMAL level doesn't have an FBC watermark */
954	level = max(level, G4X_WM_LEVEL_SR);
955
956	for (; level < dev_priv->display.wm.num_levels; level++) {
957		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
958
959		dirty |= raw->fbc != value;
960		raw->fbc = value;
961	}
962
963	return dirty;
964}
965
966static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
967			      const struct intel_plane_state *plane_state,
968			      u32 pri_val);
969
970static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
971				     const struct intel_plane_state *plane_state)
972{
973	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
974	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
975	enum plane_id plane_id = plane->id;
976	bool dirty = false;
977	int level;
978
979	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
980		dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
981		if (plane_id == PLANE_PRIMARY)
982			dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
983		goto out;
984	}
985
986	for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
987		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
988		int wm, max_wm;
989
990		wm = g4x_compute_wm(crtc_state, plane_state, level);
991		max_wm = g4x_plane_fifo_size(plane_id, level);
992
993		if (wm > max_wm)
994			break;
995
996		dirty |= raw->plane[plane_id] != wm;
997		raw->plane[plane_id] = wm;
998
999		if (plane_id != PLANE_PRIMARY ||
1000		    level == G4X_WM_LEVEL_NORMAL)
1001			continue;
1002
1003		wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1004					raw->plane[plane_id]);
1005		max_wm = g4x_fbc_fifo_size(level);
1006
1007		/*
1008		 * FBC wm is not mandatory as we
1009		 * can always just disable its use.
1010		 */
1011		if (wm > max_wm)
1012			wm = USHRT_MAX;
1013
1014		dirty |= raw->fbc != wm;
1015		raw->fbc = wm;
1016	}
1017
1018	/* mark watermarks as invalid */
1019	dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1020
1021	if (plane_id == PLANE_PRIMARY)
1022		dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1023
1024 out:
1025	if (dirty) {
1026		drm_dbg_kms(&dev_priv->drm,
1027			    "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1028			    plane->base.name,
1029			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1030			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1031			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1032
1033		if (plane_id == PLANE_PRIMARY)
1034			drm_dbg_kms(&dev_priv->drm,
1035				    "FBC watermarks: SR=%d, HPLL=%d\n",
1036				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1037				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1038	}
1039
1040	return dirty;
1041}
1042
1043static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1044				      enum plane_id plane_id, int level)
1045{
1046	const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1047
1048	return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1049}
1050
1051static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1052				     int level)
1053{
1054	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1055
1056	if (level >= dev_priv->display.wm.num_levels)
1057		return false;
1058
1059	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1060		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1061		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1062}
1063
1064/* mark all levels starting from 'level' as invalid */
1065static void g4x_invalidate_wms(struct intel_crtc *crtc,
1066			       struct g4x_wm_state *wm_state, int level)
1067{
1068	if (level <= G4X_WM_LEVEL_NORMAL) {
1069		enum plane_id plane_id;
1070
1071		for_each_plane_id_on_crtc(crtc, plane_id)
1072			wm_state->wm.plane[plane_id] = USHRT_MAX;
1073	}
1074
1075	if (level <= G4X_WM_LEVEL_SR) {
1076		wm_state->cxsr = false;
1077		wm_state->sr.cursor = USHRT_MAX;
1078		wm_state->sr.plane = USHRT_MAX;
1079		wm_state->sr.fbc = USHRT_MAX;
1080	}
1081
1082	if (level <= G4X_WM_LEVEL_HPLL) {
1083		wm_state->hpll_en = false;
1084		wm_state->hpll.cursor = USHRT_MAX;
1085		wm_state->hpll.plane = USHRT_MAX;
1086		wm_state->hpll.fbc = USHRT_MAX;
1087	}
1088}
1089
1090static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1091			       int level)
1092{
1093	if (level < G4X_WM_LEVEL_SR)
1094		return false;
1095
1096	if (level >= G4X_WM_LEVEL_SR &&
1097	    wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1098		return false;
1099
1100	if (level >= G4X_WM_LEVEL_HPLL &&
1101	    wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1102		return false;
1103
1104	return true;
1105}
1106
1107static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1108{
1109	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1110	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1111	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1112	const struct g4x_pipe_wm *raw;
1113	enum plane_id plane_id;
1114	int level;
1115
1116	level = G4X_WM_LEVEL_NORMAL;
1117	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1118		goto out;
1119
1120	raw = &crtc_state->wm.g4x.raw[level];
1121	for_each_plane_id_on_crtc(crtc, plane_id)
1122		wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1123
1124	level = G4X_WM_LEVEL_SR;
1125	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1126		goto out;
1127
1128	raw = &crtc_state->wm.g4x.raw[level];
1129	wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1130	wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1131	wm_state->sr.fbc = raw->fbc;
1132
1133	wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
1134
1135	level = G4X_WM_LEVEL_HPLL;
1136	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1137		goto out;
1138
1139	raw = &crtc_state->wm.g4x.raw[level];
1140	wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1141	wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1142	wm_state->hpll.fbc = raw->fbc;
1143
1144	wm_state->hpll_en = wm_state->cxsr;
1145
1146	level++;
1147
1148 out:
1149	if (level == G4X_WM_LEVEL_NORMAL)
1150		return -EINVAL;
1151
1152	/* invalidate the higher levels */
1153	g4x_invalidate_wms(crtc, wm_state, level);
1154
1155	/*
1156	 * Determine if the FBC watermark(s) can be used. IF
1157	 * this isn't the case we prefer to disable the FBC
1158	 * watermark(s) rather than disable the SR/HPLL
1159	 * level(s) entirely. 'level-1' is the highest valid
1160	 * level here.
1161	 */
1162	wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1163
1164	return 0;
1165}
1166
1167static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
1168			       struct intel_crtc *crtc)
1169{
1170	struct intel_crtc_state *crtc_state =
1171		intel_atomic_get_new_crtc_state(state, crtc);
1172	const struct intel_plane_state *old_plane_state;
1173	const struct intel_plane_state *new_plane_state;
1174	struct intel_plane *plane;
1175	unsigned int dirty = 0;
1176	int i;
1177
1178	for_each_oldnew_intel_plane_in_state(state, plane,
1179					     old_plane_state,
1180					     new_plane_state, i) {
1181		if (new_plane_state->hw.crtc != &crtc->base &&
1182		    old_plane_state->hw.crtc != &crtc->base)
1183			continue;
1184
1185		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1186			dirty |= BIT(plane->id);
1187	}
1188
1189	if (!dirty)
1190		return 0;
1191
1192	return _g4x_compute_pipe_wm(crtc_state);
1193}
1194
1195static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
1196				       struct intel_crtc *crtc)
1197{
1198	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1199	struct intel_crtc_state *new_crtc_state =
1200		intel_atomic_get_new_crtc_state(state, crtc);
1201	const struct intel_crtc_state *old_crtc_state =
1202		intel_atomic_get_old_crtc_state(state, crtc);
1203	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1204	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1205	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1206	enum plane_id plane_id;
1207
1208	if (!new_crtc_state->hw.active ||
1209	    intel_crtc_needs_modeset(new_crtc_state)) {
1210		*intermediate = *optimal;
1211
1212		intermediate->cxsr = false;
1213		intermediate->hpll_en = false;
1214		goto out;
1215	}
1216
1217	intermediate->cxsr = optimal->cxsr && active->cxsr &&
1218		!new_crtc_state->disable_cxsr;
1219	intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1220		!new_crtc_state->disable_cxsr;
1221	intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1222
1223	for_each_plane_id_on_crtc(crtc, plane_id) {
1224		intermediate->wm.plane[plane_id] =
1225			max(optimal->wm.plane[plane_id],
1226			    active->wm.plane[plane_id]);
1227
1228		drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1229			    g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1230	}
1231
1232	intermediate->sr.plane = max(optimal->sr.plane,
1233				     active->sr.plane);
1234	intermediate->sr.cursor = max(optimal->sr.cursor,
1235				      active->sr.cursor);
1236	intermediate->sr.fbc = max(optimal->sr.fbc,
1237				   active->sr.fbc);
1238
1239	intermediate->hpll.plane = max(optimal->hpll.plane,
1240				       active->hpll.plane);
1241	intermediate->hpll.cursor = max(optimal->hpll.cursor,
1242					active->hpll.cursor);
1243	intermediate->hpll.fbc = max(optimal->hpll.fbc,
1244				     active->hpll.fbc);
1245
1246	drm_WARN_ON(&dev_priv->drm,
1247		    (intermediate->sr.plane >
1248		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1249		     intermediate->sr.cursor >
1250		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1251		    intermediate->cxsr);
1252	drm_WARN_ON(&dev_priv->drm,
1253		    (intermediate->sr.plane >
1254		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1255		     intermediate->sr.cursor >
1256		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1257		    intermediate->hpll_en);
1258
1259	drm_WARN_ON(&dev_priv->drm,
1260		    intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1261		    intermediate->fbc_en && intermediate->cxsr);
1262	drm_WARN_ON(&dev_priv->drm,
1263		    intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1264		    intermediate->fbc_en && intermediate->hpll_en);
1265
1266out:
1267	/*
1268	 * If our intermediate WM are identical to the final WM, then we can
1269	 * omit the post-vblank programming; only update if it's different.
1270	 */
1271	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1272		new_crtc_state->wm.need_postvbl_update = true;
1273
1274	return 0;
1275}
1276
1277static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1278			 struct g4x_wm_values *wm)
1279{
1280	struct intel_crtc *crtc;
1281	int num_active_pipes = 0;
1282
1283	wm->cxsr = true;
1284	wm->hpll_en = true;
1285	wm->fbc_en = true;
1286
1287	for_each_intel_crtc(&dev_priv->drm, crtc) {
1288		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1289
1290		if (!crtc->active)
1291			continue;
1292
1293		if (!wm_state->cxsr)
1294			wm->cxsr = false;
1295		if (!wm_state->hpll_en)
1296			wm->hpll_en = false;
1297		if (!wm_state->fbc_en)
1298			wm->fbc_en = false;
1299
1300		num_active_pipes++;
1301	}
1302
1303	if (num_active_pipes != 1) {
1304		wm->cxsr = false;
1305		wm->hpll_en = false;
1306		wm->fbc_en = false;
1307	}
1308
1309	for_each_intel_crtc(&dev_priv->drm, crtc) {
1310		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1311		enum pipe pipe = crtc->pipe;
1312
1313		wm->pipe[pipe] = wm_state->wm;
1314		if (crtc->active && wm->cxsr)
1315			wm->sr = wm_state->sr;
1316		if (crtc->active && wm->hpll_en)
1317			wm->hpll = wm_state->hpll;
1318	}
1319}
1320
1321static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1322{
1323	struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
1324	struct g4x_wm_values new_wm = {};
1325
1326	g4x_merge_wm(dev_priv, &new_wm);
1327
1328	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1329		return;
1330
1331	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1332		_intel_set_memory_cxsr(dev_priv, false);
1333
1334	g4x_write_wm_values(dev_priv, &new_wm);
1335
1336	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1337		_intel_set_memory_cxsr(dev_priv, true);
1338
1339	*old_wm = new_wm;
1340}
1341
1342static void g4x_initial_watermarks(struct intel_atomic_state *state,
1343				   struct intel_crtc *crtc)
1344{
1345	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1346	const struct intel_crtc_state *crtc_state =
1347		intel_atomic_get_new_crtc_state(state, crtc);
1348
1349	mutex_lock(&dev_priv->display.wm.wm_mutex);
1350	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1351	g4x_program_watermarks(dev_priv);
1352	mutex_unlock(&dev_priv->display.wm.wm_mutex);
1353}
1354
1355static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1356				    struct intel_crtc *crtc)
1357{
1358	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1359	const struct intel_crtc_state *crtc_state =
1360		intel_atomic_get_new_crtc_state(state, crtc);
1361
1362	if (!crtc_state->wm.need_postvbl_update)
1363		return;
1364
1365	mutex_lock(&dev_priv->display.wm.wm_mutex);
1366	crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1367	g4x_program_watermarks(dev_priv);
1368	mutex_unlock(&dev_priv->display.wm.wm_mutex);
1369}
1370
1371/* latency must be in 0.1us units. */
1372static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1373				   unsigned int htotal,
1374				   unsigned int width,
1375				   unsigned int cpp,
1376				   unsigned int latency)
1377{
1378	unsigned int ret;
1379
1380	ret = intel_wm_method2(pixel_rate, htotal,
1381			       width, cpp, latency);
1382	ret = DIV_ROUND_UP(ret, 64);
1383
1384	return ret;
1385}
1386
1387static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1388{
1389	/* all latencies in usec */
1390	dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1391
1392	dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
1393
1394	if (IS_CHERRYVIEW(dev_priv)) {
1395		dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1396		dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1397
1398		dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
1399	}
1400}
1401
1402static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1403				const struct intel_plane_state *plane_state,
1404				int level)
1405{
1406	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1407	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1408	const struct drm_display_mode *pipe_mode =
1409		&crtc_state->hw.pipe_mode;
1410	unsigned int pixel_rate, htotal, cpp, width, wm;
1411
1412	if (dev_priv->display.wm.pri_latency[level] == 0)
1413		return USHRT_MAX;
1414
1415	if (!intel_wm_plane_visible(crtc_state, plane_state))
1416		return 0;
1417
1418	cpp = plane_state->hw.fb->format->cpp[0];
1419	pixel_rate = crtc_state->pixel_rate;
1420	htotal = pipe_mode->crtc_htotal;
1421	width = drm_rect_width(&plane_state->uapi.src) >> 16;
1422
1423	if (plane->id == PLANE_CURSOR) {
1424		/*
1425		 * FIXME the formula gives values that are
1426		 * too big for the cursor FIFO, and hence we
1427		 * would never be able to use cursors. For
1428		 * now just hardcode the watermark.
1429		 */
1430		wm = 63;
1431	} else {
1432		wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
1433				    dev_priv->display.wm.pri_latency[level] * 10);
1434	}
1435
1436	return min_t(unsigned int, wm, USHRT_MAX);
1437}
1438
1439static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1440{
1441	return (active_planes & (BIT(PLANE_SPRITE0) |
1442				 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1443}
1444
1445static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1446{
1447	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1448	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1449	const struct g4x_pipe_wm *raw =
1450		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1451	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1452	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1453	int num_active_planes = hweight8(active_planes);
1454	const int fifo_size = 511;
1455	int fifo_extra, fifo_left = fifo_size;
1456	int sprite0_fifo_extra = 0;
1457	unsigned int total_rate;
1458	enum plane_id plane_id;
1459
1460	/*
1461	 * When enabling sprite0 after sprite1 has already been enabled
1462	 * we tend to get an underrun unless sprite0 already has some
1463	 * FIFO space allcoated. Hence we always allocate at least one
1464	 * cacheline for sprite0 whenever sprite1 is enabled.
1465	 *
1466	 * All other plane enable sequences appear immune to this problem.
1467	 */
1468	if (vlv_need_sprite0_fifo_workaround(active_planes))
1469		sprite0_fifo_extra = 1;
1470
1471	total_rate = raw->plane[PLANE_PRIMARY] +
1472		raw->plane[PLANE_SPRITE0] +
1473		raw->plane[PLANE_SPRITE1] +
1474		sprite0_fifo_extra;
1475
1476	if (total_rate > fifo_size)
1477		return -EINVAL;
1478
1479	if (total_rate == 0)
1480		total_rate = 1;
1481
1482	for_each_plane_id_on_crtc(crtc, plane_id) {
1483		unsigned int rate;
1484
1485		if ((active_planes & BIT(plane_id)) == 0) {
1486			fifo_state->plane[plane_id] = 0;
1487			continue;
1488		}
1489
1490		rate = raw->plane[plane_id];
1491		fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1492		fifo_left -= fifo_state->plane[plane_id];
1493	}
1494
1495	fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1496	fifo_left -= sprite0_fifo_extra;
1497
1498	fifo_state->plane[PLANE_CURSOR] = 63;
1499
1500	fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1501
1502	/* spread the remainder evenly */
1503	for_each_plane_id_on_crtc(crtc, plane_id) {
1504		int plane_extra;
1505
1506		if (fifo_left == 0)
1507			break;
1508
1509		if ((active_planes & BIT(plane_id)) == 0)
1510			continue;
1511
1512		plane_extra = min(fifo_extra, fifo_left);
1513		fifo_state->plane[plane_id] += plane_extra;
1514		fifo_left -= plane_extra;
1515	}
1516
1517	drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1518
1519	/* give it all to the first plane if none are active */
1520	if (active_planes == 0) {
1521		drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1522		fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1523	}
1524
1525	return 0;
1526}
1527
1528/* mark all levels starting from 'level' as invalid */
1529static void vlv_invalidate_wms(struct intel_crtc *crtc,
1530			       struct vlv_wm_state *wm_state, int level)
1531{
1532	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533
1534	for (; level < dev_priv->display.wm.num_levels; level++) {
1535		enum plane_id plane_id;
1536
1537		for_each_plane_id_on_crtc(crtc, plane_id)
1538			wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1539
1540		wm_state->sr[level].cursor = USHRT_MAX;
1541		wm_state->sr[level].plane = USHRT_MAX;
1542	}
1543}
1544
1545static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1546{
1547	if (wm > fifo_size)
1548		return USHRT_MAX;
1549	else
1550		return fifo_size - wm;
1551}
1552
1553/*
1554 * Starting from 'level' set all higher
1555 * levels to 'value' in the "raw" watermarks.
1556 */
1557static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1558				 int level, enum plane_id plane_id, u16 value)
1559{
1560	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1561	bool dirty = false;
1562
1563	for (; level < dev_priv->display.wm.num_levels; level++) {
1564		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1565
1566		dirty |= raw->plane[plane_id] != value;
1567		raw->plane[plane_id] = value;
1568	}
1569
1570	return dirty;
1571}
1572
1573static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1574				     const struct intel_plane_state *plane_state)
1575{
1576	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1577	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1578	enum plane_id plane_id = plane->id;
1579	int level;
1580	bool dirty = false;
1581
1582	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1583		dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1584		goto out;
1585	}
1586
1587	for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
1588		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1589		int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1590		int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1591
1592		if (wm > max_wm)
1593			break;
1594
1595		dirty |= raw->plane[plane_id] != wm;
1596		raw->plane[plane_id] = wm;
1597	}
1598
1599	/* mark all higher levels as invalid */
1600	dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1601
1602out:
1603	if (dirty)
1604		drm_dbg_kms(&dev_priv->drm,
1605			    "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1606			    plane->base.name,
1607			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1608			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1609			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1610
1611	return dirty;
1612}
1613
1614static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1615				      enum plane_id plane_id, int level)
1616{
1617	const struct g4x_pipe_wm *raw =
1618		&crtc_state->wm.vlv.raw[level];
1619	const struct vlv_fifo_state *fifo_state =
1620		&crtc_state->wm.vlv.fifo_state;
1621
1622	return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1623}
1624
1625static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1626{
1627	return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1628		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1629		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1630		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1631}
1632
1633static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1634{
1635	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1636	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1637	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1638	const struct vlv_fifo_state *fifo_state =
1639		&crtc_state->wm.vlv.fifo_state;
1640	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1641	int num_active_planes = hweight8(active_planes);
1642	enum plane_id plane_id;
1643	int level;
1644
1645	/* initially allow all levels */
1646	wm_state->num_levels = dev_priv->display.wm.num_levels;
1647	/*
1648	 * Note that enabling cxsr with no primary/sprite planes
1649	 * enabled can wedge the pipe. Hence we only allow cxsr
1650	 * with exactly one enabled primary/sprite plane.
1651	 */
1652	wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1653
1654	for (level = 0; level < wm_state->num_levels; level++) {
1655		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1656		const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1657
1658		if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1659			break;
1660
1661		for_each_plane_id_on_crtc(crtc, plane_id) {
1662			wm_state->wm[level].plane[plane_id] =
1663				vlv_invert_wm_value(raw->plane[plane_id],
1664						    fifo_state->plane[plane_id]);
1665		}
1666
1667		wm_state->sr[level].plane =
1668			vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1669						 raw->plane[PLANE_SPRITE0],
1670						 raw->plane[PLANE_SPRITE1]),
1671					    sr_fifo_size);
1672
1673		wm_state->sr[level].cursor =
1674			vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1675					    63);
1676	}
1677
1678	if (level == 0)
1679		return -EINVAL;
1680
1681	/* limit to only levels we can actually handle */
1682	wm_state->num_levels = level;
1683
1684	/* invalidate the higher levels */
1685	vlv_invalidate_wms(crtc, wm_state, level);
1686
1687	return 0;
1688}
1689
1690static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
1691			       struct intel_crtc *crtc)
1692{
1693	struct intel_crtc_state *crtc_state =
1694		intel_atomic_get_new_crtc_state(state, crtc);
1695	const struct intel_plane_state *old_plane_state;
1696	const struct intel_plane_state *new_plane_state;
1697	struct intel_plane *plane;
1698	unsigned int dirty = 0;
1699	int i;
1700
1701	for_each_oldnew_intel_plane_in_state(state, plane,
1702					     old_plane_state,
1703					     new_plane_state, i) {
1704		if (new_plane_state->hw.crtc != &crtc->base &&
1705		    old_plane_state->hw.crtc != &crtc->base)
1706			continue;
1707
1708		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1709			dirty |= BIT(plane->id);
1710	}
1711
1712	/*
1713	 * DSPARB registers may have been reset due to the
1714	 * power well being turned off. Make sure we restore
1715	 * them to a consistent state even if no primary/sprite
1716	 * planes are initially active. We also force a FIFO
1717	 * recomputation so that we are sure to sanitize the
1718	 * FIFO setting we took over from the BIOS even if there
1719	 * are no active planes on the crtc.
1720	 */
1721	if (intel_crtc_needs_modeset(crtc_state))
1722		dirty = ~0;
1723
1724	if (!dirty)
1725		return 0;
1726
1727	/* cursor changes don't warrant a FIFO recompute */
1728	if (dirty & ~BIT(PLANE_CURSOR)) {
1729		const struct intel_crtc_state *old_crtc_state =
1730			intel_atomic_get_old_crtc_state(state, crtc);
1731		const struct vlv_fifo_state *old_fifo_state =
1732			&old_crtc_state->wm.vlv.fifo_state;
1733		const struct vlv_fifo_state *new_fifo_state =
1734			&crtc_state->wm.vlv.fifo_state;
1735		int ret;
1736
1737		ret = vlv_compute_fifo(crtc_state);
1738		if (ret)
1739			return ret;
1740
1741		if (intel_crtc_needs_modeset(crtc_state) ||
1742		    memcmp(old_fifo_state, new_fifo_state,
1743			   sizeof(*new_fifo_state)) != 0)
1744			crtc_state->fifo_changed = true;
1745	}
1746
1747	return _vlv_compute_pipe_wm(crtc_state);
1748}
1749
1750#define VLV_FIFO(plane, value) \
1751	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1752
1753static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1754				   struct intel_crtc *crtc)
1755{
1756	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1757	struct intel_uncore *uncore = &dev_priv->uncore;
1758	const struct intel_crtc_state *crtc_state =
1759		intel_atomic_get_new_crtc_state(state, crtc);
1760	const struct vlv_fifo_state *fifo_state =
1761		&crtc_state->wm.vlv.fifo_state;
1762	int sprite0_start, sprite1_start, fifo_size;
1763	u32 dsparb, dsparb2, dsparb3;
1764
1765	if (!crtc_state->fifo_changed)
1766		return;
1767
1768	sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1769	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1770	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1771
1772	drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
1773	drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
1774
1775	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1776
1777	/*
1778	 * uncore.lock serves a double purpose here. It allows us to
1779	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1780	 * it protects the DSPARB registers from getting clobbered by
1781	 * parallel updates from multiple pipes.
1782	 *
1783	 * intel_pipe_update_start() has already disabled interrupts
1784	 * for us, so a plain spin_lock() is sufficient here.
1785	 */
1786	spin_lock(&uncore->lock);
1787
1788	switch (crtc->pipe) {
1789	case PIPE_A:
1790		dsparb = intel_uncore_read_fw(uncore, DSPARB);
1791		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1792
1793		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1794			    VLV_FIFO(SPRITEB, 0xff));
1795		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1796			   VLV_FIFO(SPRITEB, sprite1_start));
1797
1798		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1799			     VLV_FIFO(SPRITEB_HI, 0x1));
1800		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1801			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1802
1803		intel_uncore_write_fw(uncore, DSPARB, dsparb);
1804		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1805		break;
1806	case PIPE_B:
1807		dsparb = intel_uncore_read_fw(uncore, DSPARB);
1808		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1809
1810		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1811			    VLV_FIFO(SPRITED, 0xff));
1812		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1813			   VLV_FIFO(SPRITED, sprite1_start));
1814
1815		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1816			     VLV_FIFO(SPRITED_HI, 0xff));
1817		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1818			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1819
1820		intel_uncore_write_fw(uncore, DSPARB, dsparb);
1821		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1822		break;
1823	case PIPE_C:
1824		dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
1825		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1826
1827		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1828			     VLV_FIFO(SPRITEF, 0xff));
1829		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1830			    VLV_FIFO(SPRITEF, sprite1_start));
1831
1832		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1833			     VLV_FIFO(SPRITEF_HI, 0xff));
1834		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1835			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1836
1837		intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
1838		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1839		break;
1840	default:
1841		break;
1842	}
1843
1844	intel_uncore_posting_read_fw(uncore, DSPARB);
1845
1846	spin_unlock(&uncore->lock);
1847}
1848
1849#undef VLV_FIFO
1850
1851static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
1852				       struct intel_crtc *crtc)
1853{
1854	struct intel_crtc_state *new_crtc_state =
1855		intel_atomic_get_new_crtc_state(state, crtc);
1856	const struct intel_crtc_state *old_crtc_state =
1857		intel_atomic_get_old_crtc_state(state, crtc);
1858	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
1859	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
1860	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
1861	int level;
1862
1863	if (!new_crtc_state->hw.active ||
1864	    intel_crtc_needs_modeset(new_crtc_state)) {
1865		*intermediate = *optimal;
1866
1867		intermediate->cxsr = false;
1868		goto out;
1869	}
1870
1871	intermediate->num_levels = min(optimal->num_levels, active->num_levels);
1872	intermediate->cxsr = optimal->cxsr && active->cxsr &&
1873		!new_crtc_state->disable_cxsr;
1874
1875	for (level = 0; level < intermediate->num_levels; level++) {
1876		enum plane_id plane_id;
1877
1878		for_each_plane_id_on_crtc(crtc, plane_id) {
1879			intermediate->wm[level].plane[plane_id] =
1880				min(optimal->wm[level].plane[plane_id],
1881				    active->wm[level].plane[plane_id]);
1882		}
1883
1884		intermediate->sr[level].plane = min(optimal->sr[level].plane,
1885						    active->sr[level].plane);
1886		intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
1887						     active->sr[level].cursor);
1888	}
1889
1890	vlv_invalidate_wms(crtc, intermediate, level);
1891
1892out:
1893	/*
1894	 * If our intermediate WM are identical to the final WM, then we can
1895	 * omit the post-vblank programming; only update if it's different.
1896	 */
1897	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1898		new_crtc_state->wm.need_postvbl_update = true;
1899
1900	return 0;
1901}
1902
1903static void vlv_merge_wm(struct drm_i915_private *dev_priv,
1904			 struct vlv_wm_values *wm)
1905{
1906	struct intel_crtc *crtc;
1907	int num_active_pipes = 0;
1908
1909	wm->level = dev_priv->display.wm.num_levels - 1;
1910	wm->cxsr = true;
1911
1912	for_each_intel_crtc(&dev_priv->drm, crtc) {
1913		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1914
1915		if (!crtc->active)
1916			continue;
1917
1918		if (!wm_state->cxsr)
1919			wm->cxsr = false;
1920
1921		num_active_pipes++;
1922		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1923	}
1924
1925	if (num_active_pipes != 1)
1926		wm->cxsr = false;
1927
1928	if (num_active_pipes > 1)
1929		wm->level = VLV_WM_LEVEL_PM2;
1930
1931	for_each_intel_crtc(&dev_priv->drm, crtc) {
1932		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1933		enum pipe pipe = crtc->pipe;
1934
1935		wm->pipe[pipe] = wm_state->wm[wm->level];
1936		if (crtc->active && wm->cxsr)
1937			wm->sr = wm_state->sr[wm->level];
1938
1939		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
1940		wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
1941		wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
1942		wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
1943	}
1944}
1945
1946static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
1947{
1948	struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
1949	struct vlv_wm_values new_wm = {};
1950
1951	vlv_merge_wm(dev_priv, &new_wm);
1952
1953	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1954		return;
1955
1956	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1957		chv_set_memory_dvfs(dev_priv, false);
1958
1959	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1960		chv_set_memory_pm5(dev_priv, false);
1961
1962	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1963		_intel_set_memory_cxsr(dev_priv, false);
1964
1965	vlv_write_wm_values(dev_priv, &new_wm);
1966
1967	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1968		_intel_set_memory_cxsr(dev_priv, true);
1969
1970	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1971		chv_set_memory_pm5(dev_priv, true);
1972
1973	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1974		chv_set_memory_dvfs(dev_priv, true);
1975
1976	*old_wm = new_wm;
1977}
1978
1979static void vlv_initial_watermarks(struct intel_atomic_state *state,
1980				   struct intel_crtc *crtc)
1981{
1982	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1983	const struct intel_crtc_state *crtc_state =
1984		intel_atomic_get_new_crtc_state(state, crtc);
1985
1986	mutex_lock(&dev_priv->display.wm.wm_mutex);
1987	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
1988	vlv_program_watermarks(dev_priv);
1989	mutex_unlock(&dev_priv->display.wm.wm_mutex);
1990}
1991
1992static void vlv_optimize_watermarks(struct intel_atomic_state *state,
1993				    struct intel_crtc *crtc)
1994{
1995	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1996	const struct intel_crtc_state *crtc_state =
1997		intel_atomic_get_new_crtc_state(state, crtc);
1998
1999	if (!crtc_state->wm.need_postvbl_update)
2000		return;
2001
2002	mutex_lock(&dev_priv->display.wm.wm_mutex);
2003	crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2004	vlv_program_watermarks(dev_priv);
2005	mutex_unlock(&dev_priv->display.wm.wm_mutex);
2006}
2007
2008static void i965_update_wm(struct drm_i915_private *dev_priv)
2009{
2010	struct intel_crtc *crtc;
2011	int srwm = 1;
2012	int cursor_sr = 16;
2013	bool cxsr_enabled;
2014
2015	/* Calc sr entries for one plane configs */
2016	crtc = single_enabled_crtc(dev_priv);
2017	if (crtc) {
2018		/* self-refresh has much higher latency */
2019		static const int sr_latency_ns = 12000;
2020		const struct drm_display_mode *pipe_mode =
2021			&crtc->config->hw.pipe_mode;
2022		const struct drm_framebuffer *fb =
2023			crtc->base.primary->state->fb;
2024		int pixel_rate = crtc->config->pixel_rate;
2025		int htotal = pipe_mode->crtc_htotal;
2026		int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2027		int cpp = fb->format->cpp[0];
2028		int entries;
2029
2030		entries = intel_wm_method2(pixel_rate, htotal,
2031					   width, cpp, sr_latency_ns / 100);
2032		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2033		srwm = I965_FIFO_SIZE - entries;
2034		if (srwm < 0)
2035			srwm = 1;
2036		srwm &= 0x1ff;
2037		drm_dbg_kms(&dev_priv->drm,
2038			    "self-refresh entries: %d, wm: %d\n",
2039			    entries, srwm);
2040
2041		entries = intel_wm_method2(pixel_rate, htotal,
2042					   crtc->base.cursor->state->crtc_w, 4,
2043					   sr_latency_ns / 100);
2044		entries = DIV_ROUND_UP(entries,
2045				       i965_cursor_wm_info.cacheline_size) +
2046			i965_cursor_wm_info.guard_size;
2047
2048		cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2049		if (cursor_sr > i965_cursor_wm_info.max_wm)
2050			cursor_sr = i965_cursor_wm_info.max_wm;
2051
2052		drm_dbg_kms(&dev_priv->drm,
2053			    "self-refresh watermark: display plane %d "
2054			    "cursor %d\n", srwm, cursor_sr);
2055
2056		cxsr_enabled = true;
2057	} else {
2058		cxsr_enabled = false;
2059		/* Turn off self refresh if both pipes are enabled */
2060		intel_set_memory_cxsr(dev_priv, false);
2061	}
2062
2063	drm_dbg_kms(&dev_priv->drm,
2064		    "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2065		    srwm);
2066
2067	/* 965 has limitations... */
2068	intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
2069		   FW_WM(8, CURSORB) |
2070		   FW_WM(8, PLANEB) |
2071		   FW_WM(8, PLANEA));
2072	intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
2073		   FW_WM(8, PLANEC_OLD));
2074	/* update cursor SR watermark */
2075	intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2076
2077	if (cxsr_enabled)
2078		intel_set_memory_cxsr(dev_priv, true);
2079}
2080
2081#undef FW_WM
2082
2083static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
2084					       enum i9xx_plane_id i9xx_plane)
2085{
2086	struct intel_plane *plane;
2087
2088	for_each_intel_plane(&i915->drm, plane) {
2089		if (plane->id == PLANE_PRIMARY &&
2090		    plane->i9xx_plane == i9xx_plane)
2091			return intel_crtc_for_pipe(i915, plane->pipe);
2092	}
2093
2094	return NULL;
2095}
2096
2097static void i9xx_update_wm(struct drm_i915_private *dev_priv)
2098{
2099	const struct intel_watermark_params *wm_info;
2100	u32 fwater_lo;
2101	u32 fwater_hi;
2102	int cwm, srwm = 1;
2103	int fifo_size;
2104	int planea_wm, planeb_wm;
2105	struct intel_crtc *crtc;
2106
2107	if (IS_I945GM(dev_priv))
2108		wm_info = &i945_wm_info;
2109	else if (DISPLAY_VER(dev_priv) != 2)
2110		wm_info = &i915_wm_info;
2111	else
2112		wm_info = &i830_a_wm_info;
2113
2114	if (DISPLAY_VER(dev_priv) == 2)
2115		fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
2116	else
2117		fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
2118	crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
2119	if (intel_crtc_active(crtc)) {
2120		const struct drm_framebuffer *fb =
2121			crtc->base.primary->state->fb;
2122		int cpp;
2123
2124		if (DISPLAY_VER(dev_priv) == 2)
2125			cpp = 4;
2126		else
2127			cpp = fb->format->cpp[0];
2128
2129		planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2130					       wm_info, fifo_size, cpp,
2131					       pessimal_latency_ns);
2132	} else {
2133		planea_wm = fifo_size - wm_info->guard_size;
2134		if (planea_wm > (long)wm_info->max_wm)
2135			planea_wm = wm_info->max_wm;
2136	}
2137
2138	if (DISPLAY_VER(dev_priv) == 2)
2139		wm_info = &i830_bc_wm_info;
2140
2141	if (DISPLAY_VER(dev_priv) == 2)
2142		fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
2143	else
2144		fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
2145	crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
2146	if (intel_crtc_active(crtc)) {
2147		const struct drm_framebuffer *fb =
2148			crtc->base.primary->state->fb;
2149		int cpp;
2150
2151		if (DISPLAY_VER(dev_priv) == 2)
2152			cpp = 4;
2153		else
2154			cpp = fb->format->cpp[0];
2155
2156		planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2157					       wm_info, fifo_size, cpp,
2158					       pessimal_latency_ns);
2159	} else {
2160		planeb_wm = fifo_size - wm_info->guard_size;
2161		if (planeb_wm > (long)wm_info->max_wm)
2162			planeb_wm = wm_info->max_wm;
2163	}
2164
2165	drm_dbg_kms(&dev_priv->drm,
2166		    "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2167
2168	crtc = single_enabled_crtc(dev_priv);
2169	if (IS_I915GM(dev_priv) && crtc) {
2170		struct drm_i915_gem_object *obj;
2171
2172		obj = intel_fb_obj(crtc->base.primary->state->fb);
2173
2174		/* self-refresh seems busted with untiled */
2175		if (!i915_gem_object_is_tiled(obj))
2176			crtc = NULL;
2177	}
2178
2179	/*
2180	 * Overlay gets an aggressive default since video jitter is bad.
2181	 */
2182	cwm = 2;
2183
2184	/* Play safe and disable self-refresh before adjusting watermarks. */
2185	intel_set_memory_cxsr(dev_priv, false);
2186
2187	/* Calc sr entries for one plane configs */
2188	if (HAS_FW_BLC(dev_priv) && crtc) {
2189		/* self-refresh has much higher latency */
2190		static const int sr_latency_ns = 6000;
2191		const struct drm_display_mode *pipe_mode =
2192			&crtc->config->hw.pipe_mode;
2193		const struct drm_framebuffer *fb =
2194			crtc->base.primary->state->fb;
2195		int pixel_rate = crtc->config->pixel_rate;
2196		int htotal = pipe_mode->crtc_htotal;
2197		int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2198		int cpp;
2199		int entries;
2200
2201		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2202			cpp = 4;
2203		else
2204			cpp = fb->format->cpp[0];
2205
2206		entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
2207					   sr_latency_ns / 100);
2208		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2209		drm_dbg_kms(&dev_priv->drm,
2210			    "self-refresh entries: %d\n", entries);
2211		srwm = wm_info->fifo_size - entries;
2212		if (srwm < 0)
2213			srwm = 1;
2214
2215		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2216			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
2217				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2218		else
2219			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
2220	}
2221
2222	drm_dbg_kms(&dev_priv->drm,
2223		    "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2224		     planea_wm, planeb_wm, cwm, srwm);
2225
2226	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2227	fwater_hi = (cwm & 0x1f);
2228
2229	/* Set request length to 8 cachelines per fetch */
2230	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2231	fwater_hi = fwater_hi | (1 << 8);
2232
2233	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2234	intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
2235
2236	if (crtc)
2237		intel_set_memory_cxsr(dev_priv, true);
2238}
2239
2240static void i845_update_wm(struct drm_i915_private *dev_priv)
2241{
2242	struct intel_crtc *crtc;
2243	u32 fwater_lo;
2244	int planea_wm;
2245
2246	crtc = single_enabled_crtc(dev_priv);
2247	if (crtc == NULL)
2248		return;
2249
2250	planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2251				       &i845_wm_info,
2252				       i845_get_fifo_size(dev_priv, PLANE_A),
2253				       4, pessimal_latency_ns);
2254	fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
2255	fwater_lo |= (3<<8) | planea_wm;
2256
2257	drm_dbg_kms(&dev_priv->drm,
2258		    "Setting FIFO watermarks - A: %d\n", planea_wm);
2259
2260	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2261}
2262
2263/* latency must be in 0.1us units. */
2264static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2265				   unsigned int cpp,
2266				   unsigned int latency)
2267{
2268	unsigned int ret;
2269
2270	ret = intel_wm_method1(pixel_rate, cpp, latency);
2271	ret = DIV_ROUND_UP(ret, 64) + 2;
2272
2273	return ret;
2274}
2275
2276/* latency must be in 0.1us units. */
2277static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2278				   unsigned int htotal,
2279				   unsigned int width,
2280				   unsigned int cpp,
2281				   unsigned int latency)
2282{
2283	unsigned int ret;
2284
2285	ret = intel_wm_method2(pixel_rate, htotal,
2286			       width, cpp, latency);
2287	ret = DIV_ROUND_UP(ret, 64) + 2;
2288
2289	return ret;
2290}
2291
2292static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2293{
2294	/*
2295	 * Neither of these should be possible since this function shouldn't be
2296	 * called if the CRTC is off or the plane is invisible.  But let's be
2297	 * extra paranoid to avoid a potential divide-by-zero if we screw up
2298	 * elsewhere in the driver.
2299	 */
2300	if (WARN_ON(!cpp))
2301		return 0;
2302	if (WARN_ON(!horiz_pixels))
2303		return 0;
2304
2305	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2306}
2307
2308struct ilk_wm_maximums {
2309	u16 pri;
2310	u16 spr;
2311	u16 cur;
2312	u16 fbc;
2313};
2314
2315/*
2316 * For both WM_PIPE and WM_LP.
2317 * mem_value must be in 0.1us units.
2318 */
2319static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2320			      const struct intel_plane_state *plane_state,
2321			      u32 mem_value, bool is_lp)
2322{
2323	u32 method1, method2;
2324	int cpp;
2325
2326	if (mem_value == 0)
2327		return U32_MAX;
2328
2329	if (!intel_wm_plane_visible(crtc_state, plane_state))
2330		return 0;
2331
2332	cpp = plane_state->hw.fb->format->cpp[0];
2333
2334	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2335
2336	if (!is_lp)
2337		return method1;
2338
2339	method2 = ilk_wm_method2(crtc_state->pixel_rate,
2340				 crtc_state->hw.pipe_mode.crtc_htotal,
2341				 drm_rect_width(&plane_state->uapi.src) >> 16,
2342				 cpp, mem_value);
2343
2344	return min(method1, method2);
2345}
2346
2347/*
2348 * For both WM_PIPE and WM_LP.
2349 * mem_value must be in 0.1us units.
2350 */
2351static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2352			      const struct intel_plane_state *plane_state,
2353			      u32 mem_value)
2354{
2355	u32 method1, method2;
2356	int cpp;
2357
2358	if (mem_value == 0)
2359		return U32_MAX;
2360
2361	if (!intel_wm_plane_visible(crtc_state, plane_state))
2362		return 0;
2363
2364	cpp = plane_state->hw.fb->format->cpp[0];
2365
2366	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2367	method2 = ilk_wm_method2(crtc_state->pixel_rate,
2368				 crtc_state->hw.pipe_mode.crtc_htotal,
2369				 drm_rect_width(&plane_state->uapi.src) >> 16,
2370				 cpp, mem_value);
2371	return min(method1, method2);
2372}
2373
2374/*
2375 * For both WM_PIPE and WM_LP.
2376 * mem_value must be in 0.1us units.
2377 */
2378static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2379			      const struct intel_plane_state *plane_state,
2380			      u32 mem_value)
2381{
2382	int cpp;
2383
2384	if (mem_value == 0)
2385		return U32_MAX;
2386
2387	if (!intel_wm_plane_visible(crtc_state, plane_state))
2388		return 0;
2389
2390	cpp = plane_state->hw.fb->format->cpp[0];
2391
2392	return ilk_wm_method2(crtc_state->pixel_rate,
2393			      crtc_state->hw.pipe_mode.crtc_htotal,
2394			      drm_rect_width(&plane_state->uapi.src) >> 16,
2395			      cpp, mem_value);
2396}
2397
2398/* Only for WM_LP. */
2399static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2400			      const struct intel_plane_state *plane_state,
2401			      u32 pri_val)
2402{
2403	int cpp;
2404
2405	if (!intel_wm_plane_visible(crtc_state, plane_state))
2406		return 0;
2407
2408	cpp = plane_state->hw.fb->format->cpp[0];
2409
2410	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
2411			  cpp);
2412}
2413
2414static unsigned int
2415ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2416{
2417	if (DISPLAY_VER(dev_priv) >= 8)
2418		return 3072;
2419	else if (DISPLAY_VER(dev_priv) >= 7)
2420		return 768;
2421	else
2422		return 512;
2423}
2424
2425static unsigned int
2426ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2427		     int level, bool is_sprite)
2428{
2429	if (DISPLAY_VER(dev_priv) >= 8)
2430		/* BDW primary/sprite plane watermarks */
2431		return level == 0 ? 255 : 2047;
2432	else if (DISPLAY_VER(dev_priv) >= 7)
2433		/* IVB/HSW primary/sprite plane watermarks */
2434		return level == 0 ? 127 : 1023;
2435	else if (!is_sprite)
2436		/* ILK/SNB primary plane watermarks */
2437		return level == 0 ? 127 : 511;
2438	else
2439		/* ILK/SNB sprite plane watermarks */
2440		return level == 0 ? 63 : 255;
2441}
2442
2443static unsigned int
2444ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2445{
2446	if (DISPLAY_VER(dev_priv) >= 7)
2447		return level == 0 ? 63 : 255;
2448	else
2449		return level == 0 ? 31 : 63;
2450}
2451
2452static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2453{
2454	if (DISPLAY_VER(dev_priv) >= 8)
2455		return 31;
2456	else
2457		return 15;
2458}
2459
2460/* Calculate the maximum primary/sprite plane watermark */
2461static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2462				     int level,
2463				     const struct intel_wm_config *config,
2464				     enum intel_ddb_partitioning ddb_partitioning,
2465				     bool is_sprite)
2466{
2467	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2468
2469	/* if sprites aren't enabled, sprites get nothing */
2470	if (is_sprite && !config->sprites_enabled)
2471		return 0;
2472
2473	/* HSW allows LP1+ watermarks even with multiple pipes */
2474	if (level == 0 || config->num_pipes_active > 1) {
2475		fifo_size /= INTEL_NUM_PIPES(dev_priv);
2476
2477		/*
2478		 * For some reason the non self refresh
2479		 * FIFO size is only half of the self
2480		 * refresh FIFO size on ILK/SNB.
2481		 */
2482		if (DISPLAY_VER(dev_priv) < 7)
2483			fifo_size /= 2;
2484	}
2485
2486	if (config->sprites_enabled) {
2487		/* level 0 is always calculated with 1:1 split */
2488		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2489			if (is_sprite)
2490				fifo_size *= 5;
2491			fifo_size /= 6;
2492		} else {
2493			fifo_size /= 2;
2494		}
2495	}
2496
2497	/* clamp to max that the registers can hold */
2498	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2499}
2500
2501/* Calculate the maximum cursor plane watermark */
2502static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2503				      int level,
2504				      const struct intel_wm_config *config)
2505{
2506	/* HSW LP1+ watermarks w/ multiple pipes */
2507	if (level > 0 && config->num_pipes_active > 1)
2508		return 64;
2509
2510	/* otherwise just report max that registers can hold */
2511	return ilk_cursor_wm_reg_max(dev_priv, level);
2512}
2513
2514static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2515				    int level,
2516				    const struct intel_wm_config *config,
2517				    enum intel_ddb_partitioning ddb_partitioning,
2518				    struct ilk_wm_maximums *max)
2519{
2520	max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2521	max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2522	max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2523	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2524}
2525
2526static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2527					int level,
2528					struct ilk_wm_maximums *max)
2529{
2530	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2531	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2532	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2533	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2534}
2535
2536static bool ilk_validate_wm_level(struct drm_i915_private *i915,
2537				  int level,
2538				  const struct ilk_wm_maximums *max,
2539				  struct intel_wm_level *result)
2540{
2541	bool ret;
2542
2543	/* already determined to be invalid? */
2544	if (!result->enable)
2545		return false;
2546
2547	result->enable = result->pri_val <= max->pri &&
2548			 result->spr_val <= max->spr &&
2549			 result->cur_val <= max->cur;
2550
2551	ret = result->enable;
2552
2553	/*
2554	 * HACK until we can pre-compute everything,
2555	 * and thus fail gracefully if LP0 watermarks
2556	 * are exceeded...
2557	 */
2558	if (level == 0 && !result->enable) {
2559		if (result->pri_val > max->pri)
2560			drm_dbg_kms(&i915->drm,
2561				    "Primary WM%d too large %u (max %u)\n",
2562				    level, result->pri_val, max->pri);
2563		if (result->spr_val > max->spr)
2564			drm_dbg_kms(&i915->drm,
2565				    "Sprite WM%d too large %u (max %u)\n",
2566				    level, result->spr_val, max->spr);
2567		if (result->cur_val > max->cur)
2568			drm_dbg_kms(&i915->drm,
2569				    "Cursor WM%d too large %u (max %u)\n",
2570				    level, result->cur_val, max->cur);
2571
2572		result->pri_val = min_t(u32, result->pri_val, max->pri);
2573		result->spr_val = min_t(u32, result->spr_val, max->spr);
2574		result->cur_val = min_t(u32, result->cur_val, max->cur);
2575		result->enable = true;
2576	}
2577
2578	return ret;
2579}
2580
2581static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2582				 const struct intel_crtc *crtc,
2583				 int level,
2584				 struct intel_crtc_state *crtc_state,
2585				 const struct intel_plane_state *pristate,
2586				 const struct intel_plane_state *sprstate,
2587				 const struct intel_plane_state *curstate,
2588				 struct intel_wm_level *result)
2589{
2590	u16 pri_latency = dev_priv->display.wm.pri_latency[level];
2591	u16 spr_latency = dev_priv->display.wm.spr_latency[level];
2592	u16 cur_latency = dev_priv->display.wm.cur_latency[level];
2593
2594	/* WM1+ latency values stored in 0.5us units */
2595	if (level > 0) {
2596		pri_latency *= 5;
2597		spr_latency *= 5;
2598		cur_latency *= 5;
2599	}
2600
2601	if (pristate) {
2602		result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2603						     pri_latency, level);
2604		result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2605	}
2606
2607	if (sprstate)
2608		result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2609
2610	if (curstate)
2611		result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2612
2613	result->enable = true;
2614}
2615
2616static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2617{
2618	u64 sskpd;
2619
2620	i915->display.wm.num_levels = 5;
2621
2622	sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
2623
2624	wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
2625	if (wm[0] == 0)
2626		wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
2627	wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
2628	wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
2629	wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
2630	wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
2631}
2632
2633static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2634{
2635	u32 sskpd;
2636
2637	i915->display.wm.num_levels = 4;
2638
2639	sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
2640
2641	wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
2642	wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
2643	wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
2644	wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
2645}
2646
2647static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2648{
2649	u32 mltr;
2650
2651	i915->display.wm.num_levels = 3;
2652
2653	mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
2654
2655	/* ILK primary LP0 latency is 700 ns */
2656	wm[0] = 7;
2657	wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
2658	wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
2659}
2660
2661static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2662				       u16 wm[5])
2663{
2664	/* ILK sprite LP0 latency is 1300 ns */
2665	if (DISPLAY_VER(dev_priv) == 5)
2666		wm[0] = 13;
2667}
2668
2669static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2670				       u16 wm[5])
2671{
2672	/* ILK cursor LP0 latency is 1300 ns */
2673	if (DISPLAY_VER(dev_priv) == 5)
2674		wm[0] = 13;
2675}
2676
2677static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2678				    u16 wm[5], u16 min)
2679{
2680	int level;
2681
2682	if (wm[0] >= min)
2683		return false;
2684
2685	wm[0] = max(wm[0], min);
2686	for (level = 1; level < dev_priv->display.wm.num_levels; level++)
2687		wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
2688
2689	return true;
2690}
2691
2692static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2693{
2694	bool changed;
2695
2696	/*
2697	 * The BIOS provided WM memory latency values are often
2698	 * inadequate for high resolution displays. Adjust them.
2699	 */
2700	changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
2701	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
2702	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
2703
2704	if (!changed)
2705		return;
2706
2707	drm_dbg_kms(&dev_priv->drm,
2708		    "WM latency values increased to avoid potential underruns\n");
2709	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2710	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2711	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2712}
2713
2714static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
2715{
2716	/*
2717	 * On some SNB machines (Thinkpad X220 Tablet at least)
2718	 * LP3 usage can cause vblank interrupts to be lost.
2719	 * The DEIIR bit will go high but it looks like the CPU
2720	 * never gets interrupted.
2721	 *
2722	 * It's not clear whether other interrupt source could
2723	 * be affected or if this is somehow limited to vblank
2724	 * interrupts only. To play it safe we disable LP3
2725	 * watermarks entirely.
2726	 */
2727	if (dev_priv->display.wm.pri_latency[3] == 0 &&
2728	    dev_priv->display.wm.spr_latency[3] == 0 &&
2729	    dev_priv->display.wm.cur_latency[3] == 0)
2730		return;
2731
2732	dev_priv->display.wm.pri_latency[3] = 0;
2733	dev_priv->display.wm.spr_latency[3] = 0;
2734	dev_priv->display.wm.cur_latency[3] = 0;
2735
2736	drm_dbg_kms(&dev_priv->drm,
2737		    "LP3 watermarks disabled due to potential for lost interrupts\n");
2738	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2739	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2740	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2741}
2742
2743static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2744{
2745	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2746		hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2747	else if (DISPLAY_VER(dev_priv) >= 6)
2748		snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2749	else
2750		ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2751
2752	memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
2753	       sizeof(dev_priv->display.wm.pri_latency));
2754	memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
2755	       sizeof(dev_priv->display.wm.pri_latency));
2756
2757	intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
2758	intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
2759
2760	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2761	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2762	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2763
2764	if (DISPLAY_VER(dev_priv) == 6) {
2765		snb_wm_latency_quirk(dev_priv);
2766		snb_wm_lp3_irq_quirk(dev_priv);
2767	}
2768}
2769
2770static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
2771				 struct intel_pipe_wm *pipe_wm)
2772{
2773	/* LP0 watermark maximums depend on this pipe alone */
2774	const struct intel_wm_config config = {
2775		.num_pipes_active = 1,
2776		.sprites_enabled = pipe_wm->sprites_enabled,
2777		.sprites_scaled = pipe_wm->sprites_scaled,
2778	};
2779	struct ilk_wm_maximums max;
2780
2781	/* LP0 watermarks always use 1/2 DDB partitioning */
2782	ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
2783
2784	/* At least LP0 must be valid */
2785	if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
2786		drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
2787		return false;
2788	}
2789
2790	return true;
2791}
2792
2793/* Compute new watermarks for the pipe */
2794static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
2795			       struct intel_crtc *crtc)
2796{
2797	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2798	struct intel_crtc_state *crtc_state =
2799		intel_atomic_get_new_crtc_state(state, crtc);
2800	struct intel_pipe_wm *pipe_wm;
2801	struct intel_plane *plane;
2802	const struct intel_plane_state *plane_state;
2803	const struct intel_plane_state *pristate = NULL;
2804	const struct intel_plane_state *sprstate = NULL;
2805	const struct intel_plane_state *curstate = NULL;
2806	struct ilk_wm_maximums max;
2807	int level, usable_level;
2808
2809	pipe_wm = &crtc_state->wm.ilk.optimal;
2810
2811	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
2812		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2813			pristate = plane_state;
2814		else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2815			sprstate = plane_state;
2816		else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
2817			curstate = plane_state;
2818	}
2819
2820	pipe_wm->pipe_enabled = crtc_state->hw.active;
2821	pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
2822	pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
2823
2824	usable_level = dev_priv->display.wm.num_levels - 1;
2825
2826	/* ILK/SNB: LP2+ watermarks only w/o sprites */
2827	if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
2828		usable_level = 1;
2829
2830	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2831	if (pipe_wm->sprites_scaled)
2832		usable_level = 0;
2833
2834	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2835	ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
2836			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
2837
2838	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
2839		return -EINVAL;
2840
2841	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
2842
2843	for (level = 1; level <= usable_level; level++) {
2844		struct intel_wm_level *wm = &pipe_wm->wm[level];
2845
2846		ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
2847				     pristate, sprstate, curstate, wm);
2848
2849		/*
2850		 * Disable any watermark level that exceeds the
2851		 * register maximums since such watermarks are
2852		 * always invalid.
2853		 */
2854		if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
2855			memset(wm, 0, sizeof(*wm));
2856			break;
2857		}
2858	}
2859
2860	return 0;
2861}
2862
2863/*
2864 * Build a set of 'intermediate' watermark values that satisfy both the old
2865 * state and the new state.  These can be programmed to the hardware
2866 * immediately.
2867 */
2868static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
2869				       struct intel_crtc *crtc)
2870{
2871	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2872	struct intel_crtc_state *new_crtc_state =
2873		intel_atomic_get_new_crtc_state(state, crtc);
2874	const struct intel_crtc_state *old_crtc_state =
2875		intel_atomic_get_old_crtc_state(state, crtc);
2876	struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
2877	const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
2878	int level;
2879
2880	/*
2881	 * Start with the final, target watermarks, then combine with the
2882	 * currently active watermarks to get values that are safe both before
2883	 * and after the vblank.
2884	 */
2885	*a = new_crtc_state->wm.ilk.optimal;
2886	if (!new_crtc_state->hw.active ||
2887	    intel_crtc_needs_modeset(new_crtc_state) ||
2888	    state->skip_intermediate_wm)
2889		return 0;
2890
2891	a->pipe_enabled |= b->pipe_enabled;
2892	a->sprites_enabled |= b->sprites_enabled;
2893	a->sprites_scaled |= b->sprites_scaled;
2894
2895	for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
2896		struct intel_wm_level *a_wm = &a->wm[level];
2897		const struct intel_wm_level *b_wm = &b->wm[level];
2898
2899		a_wm->enable &= b_wm->enable;
2900		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2901		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2902		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2903		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2904	}
2905
2906	/*
2907	 * We need to make sure that these merged watermark values are
2908	 * actually a valid configuration themselves.  If they're not,
2909	 * there's no safe way to transition from the old state to
2910	 * the new state, so we need to fail the atomic transaction.
2911	 */
2912	if (!ilk_validate_pipe_wm(dev_priv, a))
2913		return -EINVAL;
2914
2915	/*
2916	 * If our intermediate WM are identical to the final WM, then we can
2917	 * omit the post-vblank programming; only update if it's different.
2918	 */
2919	if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
2920		new_crtc_state->wm.need_postvbl_update = true;
2921
2922	return 0;
2923}
2924
2925/*
2926 * Merge the watermarks from all active pipes for a specific level.
2927 */
2928static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
2929			       int level,
2930			       struct intel_wm_level *ret_wm)
2931{
2932	const struct intel_crtc *crtc;
2933
2934	ret_wm->enable = true;
2935
2936	for_each_intel_crtc(&dev_priv->drm, crtc) {
2937		const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
2938		const struct intel_wm_level *wm = &active->wm[level];
2939
2940		if (!active->pipe_enabled)
2941			continue;
2942
2943		/*
2944		 * The watermark values may have been used in the past,
2945		 * so we must maintain them in the registers for some
2946		 * time even if the level is now disabled.
2947		 */
2948		if (!wm->enable)
2949			ret_wm->enable = false;
2950
2951		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2952		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2953		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2954		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2955	}
2956}
2957
2958/*
2959 * Merge all low power watermarks for all active pipes.
2960 */
2961static void ilk_wm_merge(struct drm_i915_private *dev_priv,
2962			 const struct intel_wm_config *config,
2963			 const struct ilk_wm_maximums *max,
2964			 struct intel_pipe_wm *merged)
2965{
2966	int level, num_levels = dev_priv->display.wm.num_levels;
2967	int last_enabled_level = num_levels - 1;
2968
2969	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2970	if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
2971	    config->num_pipes_active > 1)
2972		last_enabled_level = 0;
2973
2974	/* ILK: FBC WM must be disabled always */
2975	merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
2976
2977	/* merge each WM1+ level */
2978	for (level = 1; level < num_levels; level++) {
2979		struct intel_wm_level *wm = &merged->wm[level];
2980
2981		ilk_merge_wm_level(dev_priv, level, wm);
2982
2983		if (level > last_enabled_level)
2984			wm->enable = false;
2985		else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
2986			/* make sure all following levels get disabled */
2987			last_enabled_level = level - 1;
2988
2989		/*
2990		 * The spec says it is preferred to disable
2991		 * FBC WMs instead of disabling a WM level.
2992		 */
2993		if (wm->fbc_val > max->fbc) {
2994			if (wm->enable)
2995				merged->fbc_wm_enabled = false;
2996			wm->fbc_val = 0;
2997		}
2998	}
2999
3000	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3001	if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
3002	    dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
3003		for (level = 2; level < num_levels; level++) {
3004			struct intel_wm_level *wm = &merged->wm[level];
3005
3006			wm->enable = false;
3007		}
3008	}
3009}
3010
3011static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3012{
3013	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3014	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3015}
3016
3017/* The value we need to program into the WM_LPx latency field */
3018static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3019				      int level)
3020{
3021	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3022		return 2 * level;
3023	else
3024		return dev_priv->display.wm.pri_latency[level];
3025}
3026
3027static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3028				   const struct intel_pipe_wm *merged,
3029				   enum intel_ddb_partitioning partitioning,
3030				   struct ilk_wm_values *results)
3031{
3032	struct intel_crtc *crtc;
3033	int level, wm_lp;
3034
3035	results->enable_fbc_wm = merged->fbc_wm_enabled;
3036	results->partitioning = partitioning;
3037
3038	/* LP1+ register values */
3039	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3040		const struct intel_wm_level *r;
3041
3042		level = ilk_wm_lp_to_level(wm_lp, merged);
3043
3044		r = &merged->wm[level];
3045
3046		/*
3047		 * Maintain the watermark values even if the level is
3048		 * disabled. Doing otherwise could cause underruns.
3049		 */
3050		results->wm_lp[wm_lp - 1] =
3051			WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
3052			WM_LP_PRIMARY(r->pri_val) |
3053			WM_LP_CURSOR(r->cur_val);
3054
3055		if (r->enable)
3056			results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
3057
3058		if (DISPLAY_VER(dev_priv) >= 8)
3059			results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
3060		else
3061			results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
3062
3063		results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
3064
3065		/*
3066		 * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
3067		 * level is disabled. Doing otherwise could cause underruns.
3068		 */
3069		if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
3070			drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3071			results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
3072		}
3073	}
3074
3075	/* LP0 register values */
3076	for_each_intel_crtc(&dev_priv->drm, crtc) {
3077		enum pipe pipe = crtc->pipe;
3078		const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
3079		const struct intel_wm_level *r = &pipe_wm->wm[0];
3080
3081		if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3082			continue;
3083
3084		results->wm_pipe[pipe] =
3085			WM0_PIPE_PRIMARY(r->pri_val) |
3086			WM0_PIPE_SPRITE(r->spr_val) |
3087			WM0_PIPE_CURSOR(r->cur_val);
3088	}
3089}
3090
3091/*
3092 * Find the result with the highest level enabled. Check for enable_fbc_wm in
3093 * case both are at the same level. Prefer r1 in case they're the same.
3094 */
3095static struct intel_pipe_wm *
3096ilk_find_best_result(struct drm_i915_private *dev_priv,
3097		     struct intel_pipe_wm *r1,
3098		     struct intel_pipe_wm *r2)
3099{
3100	int level, level1 = 0, level2 = 0;
3101
3102	for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
3103		if (r1->wm[level].enable)
3104			level1 = level;
3105		if (r2->wm[level].enable)
3106			level2 = level;
3107	}
3108
3109	if (level1 == level2) {
3110		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3111			return r2;
3112		else
3113			return r1;
3114	} else if (level1 > level2) {
3115		return r1;
3116	} else {
3117		return r2;
3118	}
3119}
3120
3121/* dirty bits used to track which watermarks need changes */
3122#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3123#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3124#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3125#define WM_DIRTY_FBC (1 << 24)
3126#define WM_DIRTY_DDB (1 << 25)
3127
3128static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3129					 const struct ilk_wm_values *old,
3130					 const struct ilk_wm_values *new)
3131{
3132	unsigned int dirty = 0;
3133	enum pipe pipe;
3134	int wm_lp;
3135
3136	for_each_pipe(dev_priv, pipe) {
3137		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3138			dirty |= WM_DIRTY_PIPE(pipe);
3139			/* Must disable LP1+ watermarks too */
3140			dirty |= WM_DIRTY_LP_ALL;
3141		}
3142	}
3143
3144	if (old->enable_fbc_wm != new->enable_fbc_wm) {
3145		dirty |= WM_DIRTY_FBC;
3146		/* Must disable LP1+ watermarks too */
3147		dirty |= WM_DIRTY_LP_ALL;
3148	}
3149
3150	if (old->partitioning != new->partitioning) {
3151		dirty |= WM_DIRTY_DDB;
3152		/* Must disable LP1+ watermarks too */
3153		dirty |= WM_DIRTY_LP_ALL;
3154	}
3155
3156	/* LP1+ watermarks already deemed dirty, no need to continue */
3157	if (dirty & WM_DIRTY_LP_ALL)
3158		return dirty;
3159
3160	/* Find the lowest numbered LP1+ watermark in need of an update... */
3161	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3162		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3163		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3164			break;
3165	}
3166
3167	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3168	for (; wm_lp <= 3; wm_lp++)
3169		dirty |= WM_DIRTY_LP(wm_lp);
3170
3171	return dirty;
3172}
3173
3174static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3175			       unsigned int dirty)
3176{
3177	struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
3178	bool changed = false;
3179
3180	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
3181		previous->wm_lp[2] &= ~WM_LP_ENABLE;
3182		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
3183		changed = true;
3184	}
3185	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
3186		previous->wm_lp[1] &= ~WM_LP_ENABLE;
3187		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
3188		changed = true;
3189	}
3190	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
3191		previous->wm_lp[0] &= ~WM_LP_ENABLE;
3192		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
3193		changed = true;
3194	}
3195
3196	/*
3197	 * Don't touch WM_LP_SPRITE_ENABLE here.
3198	 * Doing so could cause underruns.
3199	 */
3200
3201	return changed;
3202}
3203
3204/*
3205 * The spec says we shouldn't write when we don't need, because every write
3206 * causes WMs to be re-evaluated, expending some power.
3207 */
3208static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3209				struct ilk_wm_values *results)
3210{
3211	struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
3212	unsigned int dirty;
3213
3214	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3215	if (!dirty)
3216		return;
3217
3218	_ilk_disable_lp_wm(dev_priv, dirty);
3219
3220	if (dirty & WM_DIRTY_PIPE(PIPE_A))
3221		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
3222	if (dirty & WM_DIRTY_PIPE(PIPE_B))
3223		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
3224	if (dirty & WM_DIRTY_PIPE(PIPE_C))
3225		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
3226
3227	if (dirty & WM_DIRTY_DDB) {
3228		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3229			intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
3230					 results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
3231					 WM_MISC_DATA_PARTITION_5_6);
3232		else
3233			intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
3234					 results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
3235					 DISP_DATA_PARTITION_5_6);
3236	}
3237
3238	if (dirty & WM_DIRTY_FBC)
3239		intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
3240				 results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
3241
3242	if (dirty & WM_DIRTY_LP(1) &&
3243	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3244		intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
3245
3246	if (DISPLAY_VER(dev_priv) >= 7) {
3247		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3248			intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
3249		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3250			intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
3251	}
3252
3253	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3254		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
3255	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3256		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
3257	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3258		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
3259
3260	dev_priv->display.wm.hw = *results;
3261}
3262
3263bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3264{
3265	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3266}
3267
3268static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
3269				  struct intel_wm_config *config)
3270{
3271	struct intel_crtc *crtc;
3272
3273	/* Compute the currently _active_ config */
3274	for_each_intel_crtc(&dev_priv->drm, crtc) {
3275		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3276
3277		if (!wm->pipe_enabled)
3278			continue;
3279
3280		config->sprites_enabled |= wm->sprites_enabled;
3281		config->sprites_scaled |= wm->sprites_scaled;
3282		config->num_pipes_active++;
3283	}
3284}
3285
3286static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3287{
3288	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3289	struct ilk_wm_maximums max;
3290	struct intel_wm_config config = {};
3291	struct ilk_wm_values results = {};
3292	enum intel_ddb_partitioning partitioning;
3293
3294	ilk_compute_wm_config(dev_priv, &config);
3295
3296	ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
3297	ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
3298
3299	/* 5/6 split only in single pipe config on IVB+ */
3300	if (DISPLAY_VER(dev_priv) >= 7 &&
3301	    config.num_pipes_active == 1 && config.sprites_enabled) {
3302		ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
3303		ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
3304
3305		best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
3306	} else {
3307		best_lp_wm = &lp_wm_1_2;
3308	}
3309
3310	partitioning = (best_lp_wm == &lp_wm_1_2) ?
3311		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3312
3313	ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
3314
3315	ilk_write_wm_values(dev_priv, &results);
3316}
3317
3318static void ilk_initial_watermarks(struct intel_atomic_state *state,
3319				   struct intel_crtc *crtc)
3320{
3321	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3322	const struct intel_crtc_state *crtc_state =
3323		intel_atomic_get_new_crtc_state(state, crtc);
3324
3325	mutex_lock(&dev_priv->display.wm.wm_mutex);
3326	crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
3327	ilk_program_watermarks(dev_priv);
3328	mutex_unlock(&dev_priv->display.wm.wm_mutex);
3329}
3330
3331static void ilk_optimize_watermarks(struct intel_atomic_state *state,
3332				    struct intel_crtc *crtc)
3333{
3334	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3335	const struct intel_crtc_state *crtc_state =
3336		intel_atomic_get_new_crtc_state(state, crtc);
3337
3338	if (!crtc_state->wm.need_postvbl_update)
3339		return;
3340
3341	mutex_lock(&dev_priv->display.wm.wm_mutex);
3342	crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
3343	ilk_program_watermarks(dev_priv);
3344	mutex_unlock(&dev_priv->display.wm.wm_mutex);
3345}
3346
3347static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
3348{
3349	struct drm_device *dev = crtc->base.dev;
3350	struct drm_i915_private *dev_priv = to_i915(dev);
3351	struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
3352	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
3353	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
3354	enum pipe pipe = crtc->pipe;
3355
3356	hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
3357
3358	memset(active, 0, sizeof(*active));
3359
3360	active->pipe_enabled = crtc->active;
3361
3362	if (active->pipe_enabled) {
3363		u32 tmp = hw->wm_pipe[pipe];
3364
3365		/*
3366		 * For active pipes LP0 watermark is marked as
3367		 * enabled, and LP1+ watermaks as disabled since
3368		 * we can't really reverse compute them in case
3369		 * multiple pipes are active.
3370		 */
3371		active->wm[0].enable = true;
3372		active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
3373		active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
3374		active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
3375	} else {
3376		int level;
3377
3378		/*
3379		 * For inactive pipes, all watermark levels
3380		 * should be marked as enabled but zeroed,
3381		 * which is what we'd compute them to.
3382		 */
3383		for (level = 0; level < dev_priv->display.wm.num_levels; level++)
3384			active->wm[level].enable = true;
3385	}
3386
3387	crtc->wm.active.ilk = *active;
3388}
3389
3390static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
3391{
3392	struct drm_plane *plane;
3393	struct intel_crtc *crtc;
3394
3395	for_each_intel_crtc(state->dev, crtc) {
3396		struct intel_crtc_state *crtc_state;
3397
3398		crtc_state = intel_atomic_get_crtc_state(state, crtc);
3399		if (IS_ERR(crtc_state))
3400			return PTR_ERR(crtc_state);
3401
3402		if (crtc_state->hw.active) {
3403			/*
3404			 * Preserve the inherited flag to avoid
3405			 * taking the full modeset path.
3406			 */
3407			crtc_state->inherited = true;
3408		}
3409	}
3410
3411	drm_for_each_plane(plane, state->dev) {
3412		struct drm_plane_state *plane_state;
3413
3414		plane_state = drm_atomic_get_plane_state(state, plane);
3415		if (IS_ERR(plane_state))
3416			return PTR_ERR(plane_state);
3417	}
3418
3419	return 0;
3420}
3421
3422/*
3423 * Calculate what we think the watermarks should be for the state we've read
3424 * out of the hardware and then immediately program those watermarks so that
3425 * we ensure the hardware settings match our internal state.
3426 *
3427 * We can calculate what we think WM's should be by creating a duplicate of the
3428 * current state (which was constructed during hardware readout) and running it
3429 * through the atomic check code to calculate new watermark values in the
3430 * state object.
3431 */
3432void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
3433{
3434	struct drm_atomic_state *state;
3435	struct intel_atomic_state *intel_state;
3436	struct intel_crtc *crtc;
3437	struct intel_crtc_state *crtc_state;
3438	struct drm_modeset_acquire_ctx ctx;
3439	int ret;
3440	int i;
3441
3442	/* Only supported on platforms that use atomic watermark design */
3443	if (!dev_priv->display.funcs.wm->optimize_watermarks)
3444		return;
3445
3446	if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
3447		return;
3448
3449	state = drm_atomic_state_alloc(&dev_priv->drm);
3450	if (drm_WARN_ON(&dev_priv->drm, !state))
3451		return;
3452
3453	intel_state = to_intel_atomic_state(state);
3454
3455	drm_modeset_acquire_init(&ctx, 0);
3456
3457	state->acquire_ctx = &ctx;
3458	to_intel_atomic_state(state)->internal = true;
3459
3460retry:
3461	/*
3462	 * Hardware readout is the only time we don't want to calculate
3463	 * intermediate watermarks (since we don't trust the current
3464	 * watermarks).
3465	 */
3466	if (!HAS_GMCH(dev_priv))
3467		intel_state->skip_intermediate_wm = true;
3468
3469	ret = ilk_sanitize_watermarks_add_affected(state);
3470	if (ret)
3471		goto fail;
3472
3473	ret = intel_atomic_check(&dev_priv->drm, state);
3474	if (ret)
3475		goto fail;
3476
3477	/* Write calculated watermark values back */
3478	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
3479		crtc_state->wm.need_postvbl_update = true;
3480		intel_optimize_watermarks(intel_state, crtc);
3481
3482		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
3483	}
3484
3485fail:
3486	if (ret == -EDEADLK) {
3487		drm_atomic_state_clear(state);
3488		drm_modeset_backoff(&ctx);
3489		goto retry;
3490	}
3491
3492	/*
3493	 * If we fail here, it means that the hardware appears to be
3494	 * programmed in a way that shouldn't be possible, given our
3495	 * understanding of watermark requirements.  This might mean a
3496	 * mistake in the hardware readout code or a mistake in the
3497	 * watermark calculations for a given platform.  Raise a WARN
3498	 * so that this is noticeable.
3499	 *
3500	 * If this actually happens, we'll have to just leave the
3501	 * BIOS-programmed watermarks untouched and hope for the best.
3502	 */
3503	drm_WARN(&dev_priv->drm, ret,
3504		 "Could not determine valid watermarks for inherited state\n");
3505
3506	drm_atomic_state_put(state);
3507
3508	drm_modeset_drop_locks(&ctx);
3509	drm_modeset_acquire_fini(&ctx);
3510}
3511
3512#define _FW_WM(value, plane) \
3513	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3514#define _FW_WM_VLV(value, plane) \
3515	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3516
3517static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
3518			       struct g4x_wm_values *wm)
3519{
3520	u32 tmp;
3521
3522	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
3523	wm->sr.plane = _FW_WM(tmp, SR);
3524	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
3525	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
3526	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
3527
3528	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
3529	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
3530	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
3531	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
3532	wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
3533	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
3534	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
3535
3536	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
3537	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
3538	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3539	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
3540	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
3541}
3542
3543static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3544			       struct vlv_wm_values *wm)
3545{
3546	enum pipe pipe;
3547	u32 tmp;
3548
3549	for_each_pipe(dev_priv, pipe) {
3550		tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
3551
3552		wm->ddl[pipe].plane[PLANE_PRIMARY] =
3553			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3554		wm->ddl[pipe].plane[PLANE_CURSOR] =
3555			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3556		wm->ddl[pipe].plane[PLANE_SPRITE0] =
3557			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3558		wm->ddl[pipe].plane[PLANE_SPRITE1] =
3559			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3560	}
3561
3562	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
3563	wm->sr.plane = _FW_WM(tmp, SR);
3564	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
3565	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
3566	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
3567
3568	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
3569	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
3570	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
3571	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
3572
3573	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
3574	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3575
3576	if (IS_CHERRYVIEW(dev_priv)) {
3577		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
3578		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
3579		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
3580
3581		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
3582		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
3583		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
3584
3585		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
3586		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
3587		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
3588
3589		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
3590		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3591		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
3592		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
3593		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
3594		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3595		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3596		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
3597		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3598		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3599		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
3600	} else {
3601		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
3602		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
3603		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
3604
3605		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
3606		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3607		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3608		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3609		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
3610		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3611		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3612		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
3613	}
3614}
3615
3616#undef _FW_WM
3617#undef _FW_WM_VLV
3618
3619static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
3620{
3621	struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
3622	struct intel_crtc *crtc;
3623
3624	g4x_read_wm_values(dev_priv, wm);
3625
3626	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
3627
3628	for_each_intel_crtc(&dev_priv->drm, crtc) {
3629		struct intel_crtc_state *crtc_state =
3630			to_intel_crtc_state(crtc->base.state);
3631		struct g4x_wm_state *active = &crtc->wm.active.g4x;
3632		struct g4x_pipe_wm *raw;
3633		enum pipe pipe = crtc->pipe;
3634		enum plane_id plane_id;
3635		int level, max_level;
3636
3637		active->cxsr = wm->cxsr;
3638		active->hpll_en = wm->hpll_en;
3639		active->fbc_en = wm->fbc_en;
3640
3641		active->sr = wm->sr;
3642		active->hpll = wm->hpll;
3643
3644		for_each_plane_id_on_crtc(crtc, plane_id) {
3645			active->wm.plane[plane_id] =
3646				wm->pipe[pipe].plane[plane_id];
3647		}
3648
3649		if (wm->cxsr && wm->hpll_en)
3650			max_level = G4X_WM_LEVEL_HPLL;
3651		else if (wm->cxsr)
3652			max_level = G4X_WM_LEVEL_SR;
3653		else
3654			max_level = G4X_WM_LEVEL_NORMAL;
3655
3656		level = G4X_WM_LEVEL_NORMAL;
3657		raw = &crtc_state->wm.g4x.raw[level];
3658		for_each_plane_id_on_crtc(crtc, plane_id)
3659			raw->plane[plane_id] = active->wm.plane[plane_id];
3660
3661		level = G4X_WM_LEVEL_SR;
3662		if (level > max_level)
3663			goto out;
3664
3665		raw = &crtc_state->wm.g4x.raw[level];
3666		raw->plane[PLANE_PRIMARY] = active->sr.plane;
3667		raw->plane[PLANE_CURSOR] = active->sr.cursor;
3668		raw->plane[PLANE_SPRITE0] = 0;
3669		raw->fbc = active->sr.fbc;
3670
3671		level = G4X_WM_LEVEL_HPLL;
3672		if (level > max_level)
3673			goto out;
3674
3675		raw = &crtc_state->wm.g4x.raw[level];
3676		raw->plane[PLANE_PRIMARY] = active->hpll.plane;
3677		raw->plane[PLANE_CURSOR] = active->hpll.cursor;
3678		raw->plane[PLANE_SPRITE0] = 0;
3679		raw->fbc = active->hpll.fbc;
3680
3681		level++;
3682	out:
3683		for_each_plane_id_on_crtc(crtc, plane_id)
3684			g4x_raw_plane_wm_set(crtc_state, level,
3685					     plane_id, USHRT_MAX);
3686		g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
3687
3688		g4x_invalidate_wms(crtc, active, level);
3689
3690		crtc_state->wm.g4x.optimal = *active;
3691		crtc_state->wm.g4x.intermediate = *active;
3692
3693		drm_dbg_kms(&dev_priv->drm,
3694			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
3695			    pipe_name(pipe),
3696			    wm->pipe[pipe].plane[PLANE_PRIMARY],
3697			    wm->pipe[pipe].plane[PLANE_CURSOR],
3698			    wm->pipe[pipe].plane[PLANE_SPRITE0]);
3699	}
3700
3701	drm_dbg_kms(&dev_priv->drm,
3702		    "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
3703		    wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
3704	drm_dbg_kms(&dev_priv->drm,
3705		    "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
3706		    wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
3707	drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
3708		    str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
3709		    str_yes_no(wm->fbc_en));
3710}
3711
3712static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
3713{
3714	struct intel_plane *plane;
3715	struct intel_crtc *crtc;
3716
3717	mutex_lock(&dev_priv->display.wm.wm_mutex);
3718
3719	for_each_intel_plane(&dev_priv->drm, plane) {
3720		struct intel_crtc *crtc =
3721			intel_crtc_for_pipe(dev_priv, plane->pipe);
3722		struct intel_crtc_state *crtc_state =
3723			to_intel_crtc_state(crtc->base.state);
3724		struct intel_plane_state *plane_state =
3725			to_intel_plane_state(plane->base.state);
3726		enum plane_id plane_id = plane->id;
3727		int level;
3728
3729		if (plane_state->uapi.visible)
3730			continue;
3731
3732		for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
3733			struct g4x_pipe_wm *raw =
3734				&crtc_state->wm.g4x.raw[level];
3735
3736			raw->plane[plane_id] = 0;
3737
3738			if (plane_id == PLANE_PRIMARY)
3739				raw->fbc = 0;
3740		}
3741	}
3742
3743	for_each_intel_crtc(&dev_priv->drm, crtc) {
3744		struct intel_crtc_state *crtc_state =
3745			to_intel_crtc_state(crtc->base.state);
3746		int ret;
3747
3748		ret = _g4x_compute_pipe_wm(crtc_state);
3749		drm_WARN_ON(&dev_priv->drm, ret);
3750
3751		crtc_state->wm.g4x.intermediate =
3752			crtc_state->wm.g4x.optimal;
3753		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
3754	}
3755
3756	g4x_program_watermarks(dev_priv);
3757
3758	mutex_unlock(&dev_priv->display.wm.wm_mutex);
3759}
3760
3761static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3762{
3763	g4x_wm_get_hw_state(i915);
3764	g4x_wm_sanitize(i915);
3765}
3766
3767static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
3768{
3769	struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
3770	struct intel_crtc *crtc;
3771	u32 val;
3772
3773	vlv_read_wm_values(dev_priv, wm);
3774
3775	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
3776	wm->level = VLV_WM_LEVEL_PM2;
3777
3778	if (IS_CHERRYVIEW(dev_priv)) {
3779		vlv_punit_get(dev_priv);
3780
3781		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
3782		if (val & DSP_MAXFIFO_PM5_ENABLE)
3783			wm->level = VLV_WM_LEVEL_PM5;
3784
3785		/*
3786		 * If DDR DVFS is disabled in the BIOS, Punit
3787		 * will never ack the request. So if that happens
3788		 * assume we don't have to enable/disable DDR DVFS
3789		 * dynamically. To test that just set the REQ_ACK
3790		 * bit to poke the Punit, but don't change the
3791		 * HIGH/LOW bits so that we don't actually change
3792		 * the current state.
3793		 */
3794		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3795		val |= FORCE_DDR_FREQ_REQ_ACK;
3796		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
3797
3798		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
3799			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
3800			drm_dbg_kms(&dev_priv->drm,
3801				    "Punit not acking DDR DVFS request, "
3802				    "assuming DDR DVFS is disabled\n");
3803			dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
3804		} else {
3805			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3806			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
3807				wm->level = VLV_WM_LEVEL_DDR_DVFS;
3808		}
3809
3810		vlv_punit_put(dev_priv);
3811	}
3812
3813	for_each_intel_crtc(&dev_priv->drm, crtc) {
3814		struct intel_crtc_state *crtc_state =
3815			to_intel_crtc_state(crtc->base.state);
3816		struct vlv_wm_state *active = &crtc->wm.active.vlv;
3817		const struct vlv_fifo_state *fifo_state =
3818			&crtc_state->wm.vlv.fifo_state;
3819		enum pipe pipe = crtc->pipe;
3820		enum plane_id plane_id;
3821		int level;
3822
3823		vlv_get_fifo_size(crtc_state);
3824
3825		active->num_levels = wm->level + 1;
3826		active->cxsr = wm->cxsr;
3827
3828		for (level = 0; level < active->num_levels; level++) {
3829			struct g4x_pipe_wm *raw =
3830				&crtc_state->wm.vlv.raw[level];
3831
3832			active->sr[level].plane = wm->sr.plane;
3833			active->sr[level].cursor = wm->sr.cursor;
3834
3835			for_each_plane_id_on_crtc(crtc, plane_id) {
3836				active->wm[level].plane[plane_id] =
3837					wm->pipe[pipe].plane[plane_id];
3838
3839				raw->plane[plane_id] =
3840					vlv_invert_wm_value(active->wm[level].plane[plane_id],
3841							    fifo_state->plane[plane_id]);
3842			}
3843		}
3844
3845		for_each_plane_id_on_crtc(crtc, plane_id)
3846			vlv_raw_plane_wm_set(crtc_state, level,
3847					     plane_id, USHRT_MAX);
3848		vlv_invalidate_wms(crtc, active, level);
3849
3850		crtc_state->wm.vlv.optimal = *active;
3851		crtc_state->wm.vlv.intermediate = *active;
3852
3853		drm_dbg_kms(&dev_priv->drm,
3854			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
3855			    pipe_name(pipe),
3856			    wm->pipe[pipe].plane[PLANE_PRIMARY],
3857			    wm->pipe[pipe].plane[PLANE_CURSOR],
3858			    wm->pipe[pipe].plane[PLANE_SPRITE0],
3859			    wm->pipe[pipe].plane[PLANE_SPRITE1]);
3860	}
3861
3862	drm_dbg_kms(&dev_priv->drm,
3863		    "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
3864		    wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
3865}
3866
3867static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
3868{
3869	struct intel_plane *plane;
3870	struct intel_crtc *crtc;
3871
3872	mutex_lock(&dev_priv->display.wm.wm_mutex);
3873
3874	for_each_intel_plane(&dev_priv->drm, plane) {
3875		struct intel_crtc *crtc =
3876			intel_crtc_for_pipe(dev_priv, plane->pipe);
3877		struct intel_crtc_state *crtc_state =
3878			to_intel_crtc_state(crtc->base.state);
3879		struct intel_plane_state *plane_state =
3880			to_intel_plane_state(plane->base.state);
3881		enum plane_id plane_id = plane->id;
3882		int level;
3883
3884		if (plane_state->uapi.visible)
3885			continue;
3886
3887		for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
3888			struct g4x_pipe_wm *raw =
3889				&crtc_state->wm.vlv.raw[level];
3890
3891			raw->plane[plane_id] = 0;
3892		}
3893	}
3894
3895	for_each_intel_crtc(&dev_priv->drm, crtc) {
3896		struct intel_crtc_state *crtc_state =
3897			to_intel_crtc_state(crtc->base.state);
3898		int ret;
3899
3900		ret = _vlv_compute_pipe_wm(crtc_state);
3901		drm_WARN_ON(&dev_priv->drm, ret);
3902
3903		crtc_state->wm.vlv.intermediate =
3904			crtc_state->wm.vlv.optimal;
3905		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
3906	}
3907
3908	vlv_program_watermarks(dev_priv);
3909
3910	mutex_unlock(&dev_priv->display.wm.wm_mutex);
3911}
3912
3913static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3914{
3915	vlv_wm_get_hw_state(i915);
3916	vlv_wm_sanitize(i915);
3917}
3918
3919/*
3920 * FIXME should probably kill this and improve
3921 * the real watermark readout/sanitation instead
3922 */
3923static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
3924{
3925	intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
3926	intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
3927	intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
3928
3929	/*
3930	 * Don't touch WM_LP_SPRITE_ENABLE here.
3931	 * Doing so could cause underruns.
3932	 */
3933}
3934
3935static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
3936{
3937	struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
3938	struct intel_crtc *crtc;
3939
3940	ilk_init_lp_watermarks(dev_priv);
3941
3942	for_each_intel_crtc(&dev_priv->drm, crtc)
3943		ilk_pipe_wm_get_hw_state(crtc);
3944
3945	hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
3946	hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
3947	hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
3948
3949	hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
3950	if (DISPLAY_VER(dev_priv) >= 7) {
3951		hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
3952		hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
3953	}
3954
3955	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3956		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
3957				    WM_MISC_DATA_PARTITION_5_6) ?
3958			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3959	else if (IS_IVYBRIDGE(dev_priv))
3960		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
3961				    DISP_DATA_PARTITION_5_6) ?
3962			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3963
3964	hw->enable_fbc_wm =
3965		!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3966}
3967
3968static const struct intel_wm_funcs ilk_wm_funcs = {
3969	.compute_pipe_wm = ilk_compute_pipe_wm,
3970	.compute_intermediate_wm = ilk_compute_intermediate_wm,
3971	.initial_watermarks = ilk_initial_watermarks,
3972	.optimize_watermarks = ilk_optimize_watermarks,
3973	.get_hw_state = ilk_wm_get_hw_state,
3974};
3975
3976static const struct intel_wm_funcs vlv_wm_funcs = {
3977	.compute_pipe_wm = vlv_compute_pipe_wm,
3978	.compute_intermediate_wm = vlv_compute_intermediate_wm,
3979	.initial_watermarks = vlv_initial_watermarks,
3980	.optimize_watermarks = vlv_optimize_watermarks,
3981	.atomic_update_watermarks = vlv_atomic_update_fifo,
3982	.get_hw_state = vlv_wm_get_hw_state_and_sanitize,
3983};
3984
3985static const struct intel_wm_funcs g4x_wm_funcs = {
3986	.compute_pipe_wm = g4x_compute_pipe_wm,
3987	.compute_intermediate_wm = g4x_compute_intermediate_wm,
3988	.initial_watermarks = g4x_initial_watermarks,
3989	.optimize_watermarks = g4x_optimize_watermarks,
3990	.get_hw_state = g4x_wm_get_hw_state_and_sanitize,
3991};
3992
3993static const struct intel_wm_funcs pnv_wm_funcs = {
3994	.update_wm = pnv_update_wm,
3995};
3996
3997static const struct intel_wm_funcs i965_wm_funcs = {
3998	.update_wm = i965_update_wm,
3999};
4000
4001static const struct intel_wm_funcs i9xx_wm_funcs = {
4002	.update_wm = i9xx_update_wm,
4003};
4004
4005static const struct intel_wm_funcs i845_wm_funcs = {
4006	.update_wm = i845_update_wm,
4007};
4008
4009static const struct intel_wm_funcs nop_funcs = {
4010};
4011
4012void i9xx_wm_init(struct drm_i915_private *dev_priv)
4013{
4014	/* For FIFO watermark updates */
4015	if (HAS_PCH_SPLIT(dev_priv)) {
4016		ilk_setup_wm_latency(dev_priv);
4017		dev_priv->display.funcs.wm = &ilk_wm_funcs;
4018	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4019		vlv_setup_wm_latency(dev_priv);
4020		dev_priv->display.funcs.wm = &vlv_wm_funcs;
4021	} else if (IS_G4X(dev_priv)) {
4022		g4x_setup_wm_latency(dev_priv);
4023		dev_priv->display.funcs.wm = &g4x_wm_funcs;
4024	} else if (IS_PINEVIEW(dev_priv)) {
4025		if (!intel_get_cxsr_latency(dev_priv)) {
4026			drm_info(&dev_priv->drm,
4027				 "failed to find known CxSR latency "
4028				 "(found ddr%s fsb freq %d, mem freq %d), "
4029				 "disabling CxSR\n",
4030				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
4031				 dev_priv->fsb_freq, dev_priv->mem_freq);
4032			/* Disable CxSR and never update its watermark again */
4033			intel_set_memory_cxsr(dev_priv, false);
4034			dev_priv->display.funcs.wm = &nop_funcs;
4035		} else {
4036			dev_priv->display.funcs.wm = &pnv_wm_funcs;
4037		}
4038	} else if (DISPLAY_VER(dev_priv) == 4) {
4039		dev_priv->display.funcs.wm = &i965_wm_funcs;
4040	} else if (DISPLAY_VER(dev_priv) == 3) {
4041		dev_priv->display.funcs.wm = &i9xx_wm_funcs;
4042	} else if (DISPLAY_VER(dev_priv) == 2) {
4043		if (INTEL_NUM_PIPES(dev_priv) == 1)
4044			dev_priv->display.funcs.wm = &i845_wm_funcs;
4045		else
4046			dev_priv->display.funcs.wm = &i9xx_wm_funcs;
4047	} else {
4048		drm_err(&dev_priv->drm,
4049			"unexpected fall-through in %s\n", __func__);
4050		dev_priv->display.funcs.wm = &nop_funcs;
4051	}
4052}
4053