1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2023 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i9xx_wm.h"
8#include "intel_display_types.h"
9#include "intel_wm.h"
10#include "skl_watermark.h"
11
12/**
13 * intel_update_watermarks - update FIFO watermark values based on current modes
14 * @i915: i915 device
15 *
16 * Calculate watermark values for the various WM regs based on current mode
17 * and plane configuration.
18 *
19 * There are several cases to deal with here:
20 *   - normal (i.e. non-self-refresh)
21 *   - self-refresh (SR) mode
22 *   - lines are large relative to FIFO size (buffer can hold up to 2)
23 *   - lines are small relative to FIFO size (buffer can hold more than 2
24 *     lines), so need to account for TLB latency
25 *
26 *   The normal calculation is:
27 *     watermark = dotclock * bytes per pixel * latency
28 *   where latency is platform & configuration dependent (we assume pessimal
29 *   values here).
30 *
31 *   The SR calculation is:
32 *     watermark = (trunc(latency/line time)+1) * surface width *
33 *       bytes per pixel
34 *   where
35 *     line time = htotal / dotclock
36 *     surface width = hdisplay for normal plane and 64 for cursor
37 *   and latency is assumed to be high, as above.
38 *
39 * The final value programmed to the register should always be rounded up,
40 * and include an extra 2 entries to account for clock crossings.
41 *
42 * We don't use the sprite, so we can ignore that.  And on Crestline we have
43 * to set the non-SR watermarks to 8.
44 */
45void intel_update_watermarks(struct drm_i915_private *i915)
46{
47	if (i915->display.funcs.wm->update_wm)
48		i915->display.funcs.wm->update_wm(i915);
49}
50
51int intel_compute_pipe_wm(struct intel_atomic_state *state,
52			  struct intel_crtc *crtc)
53{
54	struct drm_i915_private *i915 = to_i915(state->base.dev);
55
56	if (i915->display.funcs.wm->compute_pipe_wm)
57		return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
58
59	return 0;
60}
61
62int intel_compute_intermediate_wm(struct intel_atomic_state *state,
63				  struct intel_crtc *crtc)
64{
65	struct drm_i915_private *i915 = to_i915(state->base.dev);
66
67	if (!i915->display.funcs.wm->compute_intermediate_wm)
68		return 0;
69
70	if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
71		return 0;
72
73	return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
74}
75
76bool intel_initial_watermarks(struct intel_atomic_state *state,
77			      struct intel_crtc *crtc)
78{
79	struct drm_i915_private *i915 = to_i915(state->base.dev);
80
81	if (i915->display.funcs.wm->initial_watermarks) {
82		i915->display.funcs.wm->initial_watermarks(state, crtc);
83		return true;
84	}
85
86	return false;
87}
88
89void intel_atomic_update_watermarks(struct intel_atomic_state *state,
90				    struct intel_crtc *crtc)
91{
92	struct drm_i915_private *i915 = to_i915(state->base.dev);
93
94	if (i915->display.funcs.wm->atomic_update_watermarks)
95		i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
96}
97
98void intel_optimize_watermarks(struct intel_atomic_state *state,
99			       struct intel_crtc *crtc)
100{
101	struct drm_i915_private *i915 = to_i915(state->base.dev);
102
103	if (i915->display.funcs.wm->optimize_watermarks)
104		i915->display.funcs.wm->optimize_watermarks(state, crtc);
105}
106
107int intel_compute_global_watermarks(struct intel_atomic_state *state)
108{
109	struct drm_i915_private *i915 = to_i915(state->base.dev);
110
111	if (i915->display.funcs.wm->compute_global_watermarks)
112		return i915->display.funcs.wm->compute_global_watermarks(state);
113
114	return 0;
115}
116
117void intel_wm_get_hw_state(struct drm_i915_private *i915)
118{
119	if (i915->display.funcs.wm->get_hw_state)
120		return i915->display.funcs.wm->get_hw_state(i915);
121}
122
123bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
124			    const struct intel_plane_state *plane_state)
125{
126	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
127
128	/* FIXME check the 'enable' instead */
129	if (!crtc_state->hw.active)
130		return false;
131
132	/*
133	 * Treat cursor with fb as always visible since cursor updates
134	 * can happen faster than the vrefresh rate, and the current
135	 * watermark code doesn't handle that correctly. Cursor updates
136	 * which set/clear the fb or change the cursor size are going
137	 * to get throttled by intel_legacy_cursor_update() to work
138	 * around this problem with the watermark code.
139	 */
140	if (plane->id == PLANE_CURSOR)
141		return plane_state->hw.fb != NULL;
142	else
143		return plane_state->uapi.visible;
144}
145
146void intel_print_wm_latency(struct drm_i915_private *dev_priv,
147			    const char *name, const u16 wm[])
148{
149	int level;
150
151	for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
152		unsigned int latency = wm[level];
153
154		if (latency == 0) {
155			drm_dbg_kms(&dev_priv->drm,
156				    "%s WM%d latency not provided\n",
157				    name, level);
158			continue;
159		}
160
161		/*
162		 * - latencies are in us on gen9.
163		 * - before then, WM1+ latency values are in 0.5us units
164		 */
165		if (DISPLAY_VER(dev_priv) >= 9)
166			latency *= 10;
167		else if (level > 0)
168			latency *= 5;
169
170		drm_dbg_kms(&dev_priv->drm,
171			    "%s WM%d latency %u (%u.%u usec)\n", name, level,
172			    wm[level], latency / 10, latency % 10);
173	}
174}
175
176void intel_wm_init(struct drm_i915_private *i915)
177{
178	if (DISPLAY_VER(i915) >= 9)
179		skl_wm_init(i915);
180	else
181		i9xx_wm_init(i915);
182}
183
184static void wm_latency_show(struct seq_file *m, const u16 wm[8])
185{
186	struct drm_i915_private *dev_priv = m->private;
187	int level;
188
189	drm_modeset_lock_all(&dev_priv->drm);
190
191	for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
192		unsigned int latency = wm[level];
193
194		/*
195		 * - WM1+ latency values in 0.5us units
196		 * - latencies are in us on gen9/vlv/chv
197		 */
198		if (DISPLAY_VER(dev_priv) >= 9 ||
199		    IS_VALLEYVIEW(dev_priv) ||
200		    IS_CHERRYVIEW(dev_priv) ||
201		    IS_G4X(dev_priv))
202			latency *= 10;
203		else if (level > 0)
204			latency *= 5;
205
206		seq_printf(m, "WM%d %u (%u.%u usec)\n",
207			   level, wm[level], latency / 10, latency % 10);
208	}
209
210	drm_modeset_unlock_all(&dev_priv->drm);
211}
212
213static int pri_wm_latency_show(struct seq_file *m, void *data)
214{
215	struct drm_i915_private *dev_priv = m->private;
216	const u16 *latencies;
217
218	if (DISPLAY_VER(dev_priv) >= 9)
219		latencies = dev_priv->display.wm.skl_latency;
220	else
221		latencies = dev_priv->display.wm.pri_latency;
222
223	wm_latency_show(m, latencies);
224
225	return 0;
226}
227
228static int spr_wm_latency_show(struct seq_file *m, void *data)
229{
230	struct drm_i915_private *dev_priv = m->private;
231	const u16 *latencies;
232
233	if (DISPLAY_VER(dev_priv) >= 9)
234		latencies = dev_priv->display.wm.skl_latency;
235	else
236		latencies = dev_priv->display.wm.spr_latency;
237
238	wm_latency_show(m, latencies);
239
240	return 0;
241}
242
243static int cur_wm_latency_show(struct seq_file *m, void *data)
244{
245	struct drm_i915_private *dev_priv = m->private;
246	const u16 *latencies;
247
248	if (DISPLAY_VER(dev_priv) >= 9)
249		latencies = dev_priv->display.wm.skl_latency;
250	else
251		latencies = dev_priv->display.wm.cur_latency;
252
253	wm_latency_show(m, latencies);
254
255	return 0;
256}
257
258static int pri_wm_latency_open(struct inode *inode, struct file *file)
259{
260	struct drm_i915_private *dev_priv = inode->i_private;
261
262	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
263		return -ENODEV;
264
265	return single_open(file, pri_wm_latency_show, dev_priv);
266}
267
268static int spr_wm_latency_open(struct inode *inode, struct file *file)
269{
270	struct drm_i915_private *dev_priv = inode->i_private;
271
272	if (HAS_GMCH(dev_priv))
273		return -ENODEV;
274
275	return single_open(file, spr_wm_latency_show, dev_priv);
276}
277
278static int cur_wm_latency_open(struct inode *inode, struct file *file)
279{
280	struct drm_i915_private *dev_priv = inode->i_private;
281
282	if (HAS_GMCH(dev_priv))
283		return -ENODEV;
284
285	return single_open(file, cur_wm_latency_show, dev_priv);
286}
287
288static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
289				size_t len, loff_t *offp, u16 wm[8])
290{
291	struct seq_file *m = file->private_data;
292	struct drm_i915_private *dev_priv = m->private;
293	u16 new[8] = {};
294	int level;
295	int ret;
296	char tmp[32];
297
298	if (len >= sizeof(tmp))
299		return -EINVAL;
300
301	if (copy_from_user(tmp, ubuf, len))
302		return -EFAULT;
303
304	tmp[len] = '\0';
305
306	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
307		     &new[0], &new[1], &new[2], &new[3],
308		     &new[4], &new[5], &new[6], &new[7]);
309	if (ret != dev_priv->display.wm.num_levels)
310		return -EINVAL;
311
312	drm_modeset_lock_all(&dev_priv->drm);
313
314	for (level = 0; level < dev_priv->display.wm.num_levels; level++)
315		wm[level] = new[level];
316
317	drm_modeset_unlock_all(&dev_priv->drm);
318
319	return len;
320}
321
322static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
323				    size_t len, loff_t *offp)
324{
325	struct seq_file *m = file->private_data;
326	struct drm_i915_private *dev_priv = m->private;
327	u16 *latencies;
328
329	if (DISPLAY_VER(dev_priv) >= 9)
330		latencies = dev_priv->display.wm.skl_latency;
331	else
332		latencies = dev_priv->display.wm.pri_latency;
333
334	return wm_latency_write(file, ubuf, len, offp, latencies);
335}
336
337static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
338				    size_t len, loff_t *offp)
339{
340	struct seq_file *m = file->private_data;
341	struct drm_i915_private *dev_priv = m->private;
342	u16 *latencies;
343
344	if (DISPLAY_VER(dev_priv) >= 9)
345		latencies = dev_priv->display.wm.skl_latency;
346	else
347		latencies = dev_priv->display.wm.spr_latency;
348
349	return wm_latency_write(file, ubuf, len, offp, latencies);
350}
351
352static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
353				    size_t len, loff_t *offp)
354{
355	struct seq_file *m = file->private_data;
356	struct drm_i915_private *dev_priv = m->private;
357	u16 *latencies;
358
359	if (DISPLAY_VER(dev_priv) >= 9)
360		latencies = dev_priv->display.wm.skl_latency;
361	else
362		latencies = dev_priv->display.wm.cur_latency;
363
364	return wm_latency_write(file, ubuf, len, offp, latencies);
365}
366
367static const struct file_operations i915_pri_wm_latency_fops = {
368	.owner = THIS_MODULE,
369	.open = pri_wm_latency_open,
370	.read = seq_read,
371	.llseek = seq_lseek,
372	.release = single_release,
373	.write = pri_wm_latency_write
374};
375
376static const struct file_operations i915_spr_wm_latency_fops = {
377	.owner = THIS_MODULE,
378	.open = spr_wm_latency_open,
379	.read = seq_read,
380	.llseek = seq_lseek,
381	.release = single_release,
382	.write = spr_wm_latency_write
383};
384
385static const struct file_operations i915_cur_wm_latency_fops = {
386	.owner = THIS_MODULE,
387	.open = cur_wm_latency_open,
388	.read = seq_read,
389	.llseek = seq_lseek,
390	.release = single_release,
391	.write = cur_wm_latency_write
392};
393
394void intel_wm_debugfs_register(struct drm_i915_private *i915)
395{
396	struct drm_minor *minor = i915->drm.primary;
397
398	debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
399			    i915, &i915_pri_wm_latency_fops);
400
401	debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
402			    i915, &i915_spr_wm_latency_fops);
403
404	debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
405			    i915, &i915_cur_wm_latency_fops);
406
407	skl_watermark_debugfs_register(i915);
408}
409