1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2021 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "intel_atomic.h"
9#include "intel_de.h"
10#include "intel_display_types.h"
11#include "intel_drrs.h"
12#include "intel_panel.h"
13
14/**
15 * DOC: Display Refresh Rate Switching (DRRS)
16 *
17 * Display Refresh Rate Switching (DRRS) is a power conservation feature
18 * which enables swtching between low and high refresh rates,
19 * dynamically, based on the usage scenario. This feature is applicable
20 * for internal panels.
21 *
22 * Indication that the panel supports DRRS is given by the panel EDID, which
23 * would list multiple refresh rates for one resolution.
24 *
25 * DRRS is of 2 types - static and seamless.
26 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
27 * (may appear as a blink on screen) and is used in dock-undock scenario.
28 * Seamless DRRS involves changing RR without any visual effect to the user
29 * and can be used during normal system usage. This is done by programming
30 * certain registers.
31 *
32 * Support for static/seamless DRRS may be indicated in the VBT based on
33 * inputs from the panel spec.
34 *
35 * DRRS saves power by switching to low RR based on usage scenarios.
36 *
37 * The implementation is based on frontbuffer tracking implementation.  When
38 * there is a disturbance on the screen triggered by user activity or a periodic
39 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
40 * no movement on screen, after a timeout of 1 second, a switch to low RR is
41 * made.
42 *
43 * For integration with frontbuffer tracking code, intel_drrs_invalidate()
44 * and intel_drrs_flush() are called.
45 *
46 * DRRS can be further extended to support other internal panels and also
47 * the scenario of video playback wherein RR is set based on the rate
48 * requested by userspace.
49 */
50
51const char *intel_drrs_type_str(enum drrs_type drrs_type)
52{
53	static const char * const str[] = {
54		[DRRS_TYPE_NONE] = "none",
55		[DRRS_TYPE_STATIC] = "static",
56		[DRRS_TYPE_SEAMLESS] = "seamless",
57	};
58
59	if (drrs_type >= ARRAY_SIZE(str))
60		return "<invalid>";
61
62	return str[drrs_type];
63}
64
65static void
66intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
67				     enum drrs_refresh_rate refresh_rate)
68{
69	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
70	enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
71	u32 bit;
72
73	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
74		bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
75	else
76		bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
77
78	intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder),
79		     bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
80}
81
82static void
83intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc,
84				enum drrs_refresh_rate refresh_rate)
85{
86	intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder,
87				       refresh_rate == DRRS_REFRESH_RATE_LOW ?
88				       &crtc->drrs.m2_n2 : &crtc->drrs.m_n);
89}
90
91bool intel_drrs_is_active(struct intel_crtc *crtc)
92{
93	return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER;
94}
95
96static void intel_drrs_set_state(struct intel_crtc *crtc,
97				 enum drrs_refresh_rate refresh_rate)
98{
99	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
100
101	if (refresh_rate == crtc->drrs.refresh_rate)
102		return;
103
104	if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
105		intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
106	else
107		intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
108
109	crtc->drrs.refresh_rate = refresh_rate;
110}
111
112static void intel_drrs_schedule_work(struct intel_crtc *crtc)
113{
114	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
115
116	mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
117}
118
119static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
120{
121	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
122	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
123	unsigned int frontbuffer_bits;
124
125	frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
126
127	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
128					 crtc_state->bigjoiner_pipes)
129		frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
130
131	return frontbuffer_bits;
132}
133
134/**
135 * intel_drrs_activate - activate DRRS
136 * @crtc_state: the crtc state
137 *
138 * Activates DRRS on the crtc.
139 */
140void intel_drrs_activate(const struct intel_crtc_state *crtc_state)
141{
142	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
143
144	if (!crtc_state->has_drrs)
145		return;
146
147	if (!crtc_state->hw.active)
148		return;
149
150	if (intel_crtc_is_bigjoiner_slave(crtc_state))
151		return;
152
153	mutex_lock(&crtc->drrs.mutex);
154
155	crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder;
156	crtc->drrs.m_n = crtc_state->dp_m_n;
157	crtc->drrs.m2_n2 = crtc_state->dp_m2_n2;
158	crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state);
159	crtc->drrs.busy_frontbuffer_bits = 0;
160
161	intel_drrs_schedule_work(crtc);
162
163	mutex_unlock(&crtc->drrs.mutex);
164}
165
166/**
167 * intel_drrs_deactivate - deactivate DRRS
168 * @old_crtc_state: the old crtc state
169 *
170 * Deactivates DRRS on the crtc.
171 */
172void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state)
173{
174	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
175
176	if (!old_crtc_state->has_drrs)
177		return;
178
179	if (!old_crtc_state->hw.active)
180		return;
181
182	if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
183		return;
184
185	mutex_lock(&crtc->drrs.mutex);
186
187	if (intel_drrs_is_active(crtc))
188		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
189
190	crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
191	crtc->drrs.frontbuffer_bits = 0;
192	crtc->drrs.busy_frontbuffer_bits = 0;
193
194	mutex_unlock(&crtc->drrs.mutex);
195
196	cancel_delayed_work_sync(&crtc->drrs.work);
197}
198
199static void intel_drrs_downclock_work(struct work_struct *work)
200{
201	struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
202
203	mutex_lock(&crtc->drrs.mutex);
204
205	if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits)
206		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW);
207
208	mutex_unlock(&crtc->drrs.mutex);
209}
210
211static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
212					  unsigned int all_frontbuffer_bits,
213					  bool invalidate)
214{
215	struct intel_crtc *crtc;
216
217	for_each_intel_crtc(&dev_priv->drm, crtc) {
218		unsigned int frontbuffer_bits;
219
220		mutex_lock(&crtc->drrs.mutex);
221
222		frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits;
223		if (!frontbuffer_bits) {
224			mutex_unlock(&crtc->drrs.mutex);
225			continue;
226		}
227
228		if (invalidate)
229			crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
230		else
231			crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
232
233		/* flush/invalidate means busy screen hence upclock */
234		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
235
236		/*
237		 * flush also means no more activity hence schedule downclock, if all
238		 * other fbs are quiescent too
239		 */
240		if (!crtc->drrs.busy_frontbuffer_bits)
241			intel_drrs_schedule_work(crtc);
242		else
243			cancel_delayed_work(&crtc->drrs.work);
244
245		mutex_unlock(&crtc->drrs.mutex);
246	}
247}
248
249/**
250 * intel_drrs_invalidate - Disable Idleness DRRS
251 * @dev_priv: i915 device
252 * @frontbuffer_bits: frontbuffer plane tracking bits
253 *
254 * This function gets called everytime rendering on the given planes start.
255 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
256 *
257 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
258 */
259void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
260			   unsigned int frontbuffer_bits)
261{
262	intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
263}
264
265/**
266 * intel_drrs_flush - Restart Idleness DRRS
267 * @dev_priv: i915 device
268 * @frontbuffer_bits: frontbuffer plane tracking bits
269 *
270 * This function gets called every time rendering on the given planes has
271 * completed or flip on a crtc is completed. So DRRS should be upclocked
272 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
273 * if no other planes are dirty.
274 *
275 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
276 */
277void intel_drrs_flush(struct drm_i915_private *dev_priv,
278		      unsigned int frontbuffer_bits)
279{
280	intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
281}
282
283/**
284 * intel_drrs_crtc_init - Init DRRS for CRTC
285 * @crtc: crtc
286 *
287 * This function is called only once at driver load to initialize basic
288 * DRRS stuff.
289 *
290 */
291void intel_drrs_crtc_init(struct intel_crtc *crtc)
292{
293	INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
294	rw_init(&crtc->drrs.mutex, "drrs");
295	crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
296}
297
298static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
299{
300	struct intel_crtc *crtc = m->private;
301	const struct intel_crtc_state *crtc_state;
302	int ret;
303
304	ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
305	if (ret)
306		return ret;
307
308	crtc_state = to_intel_crtc_state(crtc->base.state);
309
310	mutex_lock(&crtc->drrs.mutex);
311
312	seq_printf(m, "DRRS enabled: %s\n",
313		   str_yes_no(crtc_state->has_drrs));
314
315	seq_printf(m, "DRRS active: %s\n",
316		   str_yes_no(intel_drrs_is_active(crtc)));
317
318	seq_printf(m, "DRRS refresh rate: %s\n",
319		   crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
320		   "low" : "high");
321
322	seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n",
323		   crtc->drrs.busy_frontbuffer_bits);
324
325	mutex_unlock(&crtc->drrs.mutex);
326
327	drm_modeset_unlock(&crtc->base.mutex);
328
329	return 0;
330}
331
332DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status);
333
334static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
335{
336	struct intel_crtc *crtc = data;
337	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
338	struct intel_crtc_state *crtc_state;
339	struct drm_crtc_commit *commit;
340	int ret;
341
342	ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
343	if (ret)
344		return ret;
345
346	crtc_state = to_intel_crtc_state(crtc->base.state);
347
348	if (!crtc_state->hw.active ||
349	    !crtc_state->has_drrs)
350		goto out;
351
352	commit = crtc_state->uapi.commit;
353	if (commit) {
354		ret = wait_for_completion_interruptible(&commit->hw_done);
355		if (ret)
356			goto out;
357	}
358
359	drm_dbg(&i915->drm,
360		"Manually %sactivating DRRS\n", val ? "" : "de");
361
362	if (val)
363		intel_drrs_activate(crtc_state);
364	else
365		intel_drrs_deactivate(crtc_state);
366
367out:
368	drm_modeset_unlock(&crtc->base.mutex);
369
370	return ret;
371}
372
373DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
374			 NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
375
376void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
377{
378	debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
379			    crtc, &intel_drrs_debugfs_status_fops);
380
381	debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
382				   crtc, &intel_drrs_debugfs_ctl_fops);
383}
384
385static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
386{
387	struct intel_connector *connector = m->private;
388
389	seq_printf(m, "DRRS type: %s\n",
390		   intel_drrs_type_str(intel_panel_drrs_type(connector)));
391
392	return 0;
393}
394
395DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type);
396
397void intel_drrs_connector_debugfs_add(struct intel_connector *connector)
398{
399	if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE)
400		return;
401
402	debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry,
403			    connector, &intel_drrs_debugfs_type_fops);
404}
405