1/*	$NetBSD: radeon_pm.c,v 1.6 2021/12/18 23:45:43 riastradh Exp $	*/
2
3/*
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafa�� Mi��ecki <zajec5@gmail.com>
23 *          Alex Deucher <alexdeucher@gmail.com>
24 */
25
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: radeon_pm.c,v 1.6 2021/12/18 23:45:43 riastradh Exp $");
28
29#include <linux/hwmon-sysfs.h>
30#include <linux/hwmon.h>
31#include <linux/pci.h>
32#include <linux/power_supply.h>
33
34#include <drm/drm_debugfs.h>
35#include <drm/drm_vblank.h>
36
37#include "atom.h"
38#include "avivod.h"
39#include "r600_dpm.h"
40#include "radeon.h"
41
42#define RADEON_IDLE_LOOP_MS 100
43#define RADEON_RECLOCK_DELAY_MS 200
44#define RADEON_WAIT_VBLANK_TIMEOUT 200
45
46static const char *radeon_pm_state_type_name[5] = {
47	"",
48	"Powersave",
49	"Battery",
50	"Balanced",
51	"Performance",
52};
53
54static void radeon_dynpm_idle_work_handler(struct work_struct *work);
55static int radeon_debugfs_pm_init(struct radeon_device *rdev);
56static bool radeon_pm_in_vbl(struct radeon_device *rdev);
57static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
58static void radeon_pm_update_profile(struct radeon_device *rdev);
59static void radeon_pm_set_clocks(struct radeon_device *rdev);
60
61int radeon_pm_get_type_index(struct radeon_device *rdev,
62			     enum radeon_pm_state_type ps_type,
63			     int instance)
64{
65	int i;
66	int found_instance = -1;
67
68	for (i = 0; i < rdev->pm.num_power_states; i++) {
69		if (rdev->pm.power_state[i].type == ps_type) {
70			found_instance++;
71			if (found_instance == instance)
72				return i;
73		}
74	}
75	/* return default if no match */
76	return rdev->pm.default_power_state_index;
77}
78
79void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
80{
81	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
82		mutex_lock(&rdev->pm.mutex);
83		if (power_supply_is_system_supplied() > 0)
84			rdev->pm.dpm.ac_power = true;
85		else
86			rdev->pm.dpm.ac_power = false;
87		if (rdev->family == CHIP_ARUBA) {
88			if (rdev->asic->dpm.enable_bapm)
89				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
90		}
91		mutex_unlock(&rdev->pm.mutex);
92	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
93		if (rdev->pm.profile == PM_PROFILE_AUTO) {
94			mutex_lock(&rdev->pm.mutex);
95			radeon_pm_update_profile(rdev);
96			radeon_pm_set_clocks(rdev);
97			mutex_unlock(&rdev->pm.mutex);
98		}
99	}
100}
101
102static void radeon_pm_update_profile(struct radeon_device *rdev)
103{
104	switch (rdev->pm.profile) {
105	case PM_PROFILE_DEFAULT:
106		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
107		break;
108	case PM_PROFILE_AUTO:
109		if (power_supply_is_system_supplied() > 0) {
110			if (rdev->pm.active_crtc_count > 1)
111				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
112			else
113				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
114		} else {
115			if (rdev->pm.active_crtc_count > 1)
116				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117			else
118				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119		}
120		break;
121	case PM_PROFILE_LOW:
122		if (rdev->pm.active_crtc_count > 1)
123			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
124		else
125			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
126		break;
127	case PM_PROFILE_MID:
128		if (rdev->pm.active_crtc_count > 1)
129			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
130		else
131			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
132		break;
133	case PM_PROFILE_HIGH:
134		if (rdev->pm.active_crtc_count > 1)
135			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
136		else
137			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
138		break;
139	}
140
141	if (rdev->pm.active_crtc_count == 0) {
142		rdev->pm.requested_power_state_index =
143			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
144		rdev->pm.requested_clock_mode_index =
145			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
146	} else {
147		rdev->pm.requested_power_state_index =
148			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
149		rdev->pm.requested_clock_mode_index =
150			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
151	}
152}
153
154static void radeon_unmap_vram_bos(struct radeon_device *rdev)
155{
156	struct radeon_bo *bo, *n;
157
158	if (list_empty(&rdev->gem.objects))
159		return;
160
161	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
162		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
163			ttm_bo_unmap_virtual(&bo->tbo);
164	}
165}
166
167static void radeon_sync_with_vblank(struct radeon_device *rdev)
168{
169	if (rdev->pm.active_crtcs) {
170#ifdef __NetBSD__
171		int ret __unused;
172
173		spin_lock(&rdev->irq.vblank_lock);
174		rdev->pm.vblank_sync = false;
175		DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &rdev->irq.vblank_queue,
176		    &rdev->irq.vblank_lock,
177		    msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT),
178		    rdev->pm.vblank_sync);
179		spin_unlock(&rdev->irq.vblank_lock);
180#else
181		rdev->pm.vblank_sync = false;
182		wait_event_timeout(
183			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
184			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
185#endif
186	}
187}
188
189static void radeon_set_power_state(struct radeon_device *rdev)
190{
191	u32 sclk, mclk;
192	bool misc_after = false;
193
194	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
195	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
196		return;
197
198	if (radeon_gui_idle(rdev)) {
199		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
200			clock_info[rdev->pm.requested_clock_mode_index].sclk;
201		if (sclk > rdev->pm.default_sclk)
202			sclk = rdev->pm.default_sclk;
203
204		/* starting with BTC, there is one state that is used for both
205		 * MH and SH.  Difference is that we always use the high clock index for
206		 * mclk and vddci.
207		 */
208		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
209		    (rdev->family >= CHIP_BARTS) &&
210		    rdev->pm.active_crtc_count &&
211		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
212		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
213			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
214				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
215		else
216			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
217				clock_info[rdev->pm.requested_clock_mode_index].mclk;
218
219		if (mclk > rdev->pm.default_mclk)
220			mclk = rdev->pm.default_mclk;
221
222		/* upvolt before raising clocks, downvolt after lowering clocks */
223		if (sclk < rdev->pm.current_sclk)
224			misc_after = true;
225
226		radeon_sync_with_vblank(rdev);
227
228		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
229			if (!radeon_pm_in_vbl(rdev))
230				return;
231		}
232
233		radeon_pm_prepare(rdev);
234
235		if (!misc_after)
236			/* voltage, pcie lanes, etc.*/
237			radeon_pm_misc(rdev);
238
239		/* set engine clock */
240		if (sclk != rdev->pm.current_sclk) {
241			radeon_pm_debug_check_in_vbl(rdev, false);
242			radeon_set_engine_clock(rdev, sclk);
243			radeon_pm_debug_check_in_vbl(rdev, true);
244			rdev->pm.current_sclk = sclk;
245			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
246		}
247
248		/* set memory clock */
249		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
250			radeon_pm_debug_check_in_vbl(rdev, false);
251			radeon_set_memory_clock(rdev, mclk);
252			radeon_pm_debug_check_in_vbl(rdev, true);
253			rdev->pm.current_mclk = mclk;
254			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
255		}
256
257		if (misc_after)
258			/* voltage, pcie lanes, etc.*/
259			radeon_pm_misc(rdev);
260
261		radeon_pm_finish(rdev);
262
263		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
264		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
265	} else
266		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
267}
268
269static void radeon_pm_set_clocks(struct radeon_device *rdev)
270{
271	struct drm_crtc *crtc;
272	int i, r;
273
274	/* no need to take locks, etc. if nothing's going to change */
275	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
276	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
277		return;
278
279	down_write(&rdev->pm.mclk_lock);
280	mutex_lock(&rdev->ring_lock);
281
282	/* wait for the rings to drain */
283	for (i = 0; i < RADEON_NUM_RINGS; i++) {
284		struct radeon_ring *ring = &rdev->ring[i];
285		if (!ring->ready) {
286			continue;
287		}
288		r = radeon_fence_wait_empty(rdev, i);
289		if (r) {
290			/* needs a GPU reset dont reset here */
291			mutex_unlock(&rdev->ring_lock);
292			up_write(&rdev->pm.mclk_lock);
293			return;
294		}
295	}
296
297	radeon_unmap_vram_bos(rdev);
298
299	if (rdev->irq.installed) {
300		i = 0;
301		drm_for_each_crtc(crtc, rdev->ddev) {
302			if (rdev->pm.active_crtcs & (1 << i)) {
303				/* This can fail if a modeset is in progress */
304				if (drm_crtc_vblank_get(crtc) == 0)
305					rdev->pm.req_vblank |= (1 << i);
306				else
307					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
308							 i);
309			}
310			i++;
311		}
312	}
313
314	radeon_set_power_state(rdev);
315
316	if (rdev->irq.installed) {
317		i = 0;
318		drm_for_each_crtc(crtc, rdev->ddev) {
319			if (rdev->pm.req_vblank & (1 << i)) {
320				rdev->pm.req_vblank &= ~(1 << i);
321				drm_crtc_vblank_put(crtc);
322			}
323			i++;
324		}
325	}
326
327	/* update display watermarks based on new power state */
328	radeon_update_bandwidth_info(rdev);
329	if (rdev->pm.active_crtc_count)
330		radeon_bandwidth_update(rdev);
331
332	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
333
334	mutex_unlock(&rdev->ring_lock);
335	up_write(&rdev->pm.mclk_lock);
336}
337
338static void radeon_pm_print_states(struct radeon_device *rdev)
339{
340	int i, j;
341	struct radeon_power_state *power_state;
342	struct radeon_pm_clock_info *clock_info;
343
344	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
345	for (i = 0; i < rdev->pm.num_power_states; i++) {
346		power_state = &rdev->pm.power_state[i];
347		DRM_DEBUG_DRIVER("State %d: %s\n", i,
348			radeon_pm_state_type_name[power_state->type]);
349		if (i == rdev->pm.default_power_state_index)
350			DRM_DEBUG_DRIVER("\tDefault");
351		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
352			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
353		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
354			DRM_DEBUG_DRIVER("\tSingle display only\n");
355		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
356		for (j = 0; j < power_state->num_clock_modes; j++) {
357			clock_info = &(power_state->clock_info[j]);
358			if (rdev->flags & RADEON_IS_IGP)
359				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
360						 j,
361						 clock_info->sclk * 10);
362			else
363				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
364						 j,
365						 clock_info->sclk * 10,
366						 clock_info->mclk * 10,
367						 clock_info->voltage.voltage);
368		}
369	}
370}
371
372#ifndef __NetBSD__		/* XXX radeon power */
373static ssize_t radeon_get_pm_profile(struct device *dev,
374				     struct device_attribute *attr,
375				     char *buf)
376{
377	struct drm_device *ddev = dev_get_drvdata(dev);
378	struct radeon_device *rdev = ddev->dev_private;
379	int cp = rdev->pm.profile;
380
381	return snprintf(buf, PAGE_SIZE, "%s\n",
382			(cp == PM_PROFILE_AUTO) ? "auto" :
383			(cp == PM_PROFILE_LOW) ? "low" :
384			(cp == PM_PROFILE_MID) ? "mid" :
385			(cp == PM_PROFILE_HIGH) ? "high" : "default");
386}
387
388static ssize_t radeon_set_pm_profile(struct device *dev,
389				     struct device_attribute *attr,
390				     const char *buf,
391				     size_t count)
392{
393	struct drm_device *ddev = dev_get_drvdata(dev);
394	struct radeon_device *rdev = ddev->dev_private;
395
396	/* Can't set profile when the card is off */
397	if  ((rdev->flags & RADEON_IS_PX) &&
398	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
399		return -EINVAL;
400
401	mutex_lock(&rdev->pm.mutex);
402	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
403		if (strncmp("default", buf, strlen("default")) == 0)
404			rdev->pm.profile = PM_PROFILE_DEFAULT;
405		else if (strncmp("auto", buf, strlen("auto")) == 0)
406			rdev->pm.profile = PM_PROFILE_AUTO;
407		else if (strncmp("low", buf, strlen("low")) == 0)
408			rdev->pm.profile = PM_PROFILE_LOW;
409		else if (strncmp("mid", buf, strlen("mid")) == 0)
410			rdev->pm.profile = PM_PROFILE_MID;
411		else if (strncmp("high", buf, strlen("high")) == 0)
412			rdev->pm.profile = PM_PROFILE_HIGH;
413		else {
414			count = -EINVAL;
415			goto fail;
416		}
417		radeon_pm_update_profile(rdev);
418		radeon_pm_set_clocks(rdev);
419	} else
420		count = -EINVAL;
421
422fail:
423	mutex_unlock(&rdev->pm.mutex);
424
425	return count;
426}
427
428static ssize_t radeon_get_pm_method(struct device *dev,
429				    struct device_attribute *attr,
430				    char *buf)
431{
432	struct drm_device *ddev = dev_get_drvdata(dev);
433	struct radeon_device *rdev = ddev->dev_private;
434	int pm = rdev->pm.pm_method;
435
436	return snprintf(buf, PAGE_SIZE, "%s\n",
437			(pm == PM_METHOD_DYNPM) ? "dynpm" :
438			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
439}
440
441static ssize_t radeon_set_pm_method(struct device *dev,
442				    struct device_attribute *attr,
443				    const char *buf,
444				    size_t count)
445{
446	struct drm_device *ddev = dev_get_drvdata(dev);
447	struct radeon_device *rdev = ddev->dev_private;
448
449	/* Can't set method when the card is off */
450	if  ((rdev->flags & RADEON_IS_PX) &&
451	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
452		count = -EINVAL;
453		goto fail;
454	}
455
456	/* we don't support the legacy modes with dpm */
457	if (rdev->pm.pm_method == PM_METHOD_DPM) {
458		count = -EINVAL;
459		goto fail;
460	}
461
462	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
463		mutex_lock(&rdev->pm.mutex);
464		rdev->pm.pm_method = PM_METHOD_DYNPM;
465		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
466		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
467		mutex_unlock(&rdev->pm.mutex);
468	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
469		mutex_lock(&rdev->pm.mutex);
470		/* disable dynpm */
471		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
472		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
473		rdev->pm.pm_method = PM_METHOD_PROFILE;
474		mutex_unlock(&rdev->pm.mutex);
475		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
476	} else {
477		count = -EINVAL;
478		goto fail;
479	}
480	radeon_pm_compute_clocks(rdev);
481fail:
482	return count;
483}
484
485static ssize_t radeon_get_dpm_state(struct device *dev,
486				    struct device_attribute *attr,
487				    char *buf)
488{
489	struct drm_device *ddev = dev_get_drvdata(dev);
490	struct radeon_device *rdev = ddev->dev_private;
491	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
492
493	return snprintf(buf, PAGE_SIZE, "%s\n",
494			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
495			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
496}
497
498static ssize_t radeon_set_dpm_state(struct device *dev,
499				    struct device_attribute *attr,
500				    const char *buf,
501				    size_t count)
502{
503	struct drm_device *ddev = dev_get_drvdata(dev);
504	struct radeon_device *rdev = ddev->dev_private;
505
506	mutex_lock(&rdev->pm.mutex);
507	if (strncmp("battery", buf, strlen("battery")) == 0)
508		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
509	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
510		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
511	else if (strncmp("performance", buf, strlen("performance")) == 0)
512		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
513	else {
514		mutex_unlock(&rdev->pm.mutex);
515		count = -EINVAL;
516		goto fail;
517	}
518	mutex_unlock(&rdev->pm.mutex);
519
520	/* Can't set dpm state when the card is off */
521	if (!(rdev->flags & RADEON_IS_PX) ||
522	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
523		radeon_pm_compute_clocks(rdev);
524
525fail:
526	return count;
527}
528
529static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
530						       struct device_attribute *attr,
531						       char *buf)
532{
533	struct drm_device *ddev = dev_get_drvdata(dev);
534	struct radeon_device *rdev = ddev->dev_private;
535	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
536
537	if  ((rdev->flags & RADEON_IS_PX) &&
538	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
539		return snprintf(buf, PAGE_SIZE, "off\n");
540
541	return snprintf(buf, PAGE_SIZE, "%s\n",
542			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
543			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
544}
545
546static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
547						       struct device_attribute *attr,
548						       const char *buf,
549						       size_t count)
550{
551	struct drm_device *ddev = dev_get_drvdata(dev);
552	struct radeon_device *rdev = ddev->dev_private;
553	enum radeon_dpm_forced_level level;
554	int ret = 0;
555
556	/* Can't force performance level when the card is off */
557	if  ((rdev->flags & RADEON_IS_PX) &&
558	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
559		return -EINVAL;
560
561	mutex_lock(&rdev->pm.mutex);
562	if (strncmp("low", buf, strlen("low")) == 0) {
563		level = RADEON_DPM_FORCED_LEVEL_LOW;
564	} else if (strncmp("high", buf, strlen("high")) == 0) {
565		level = RADEON_DPM_FORCED_LEVEL_HIGH;
566	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
567		level = RADEON_DPM_FORCED_LEVEL_AUTO;
568	} else {
569		count = -EINVAL;
570		goto fail;
571	}
572	if (rdev->asic->dpm.force_performance_level) {
573		if (rdev->pm.dpm.thermal_active) {
574			count = -EINVAL;
575			goto fail;
576		}
577		ret = radeon_dpm_force_performance_level(rdev, level);
578		if (ret)
579			count = -EINVAL;
580	}
581fail:
582	mutex_unlock(&rdev->pm.mutex);
583
584	return count;
585}
586
587static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
588					    struct device_attribute *attr,
589					    char *buf)
590{
591	struct radeon_device *rdev = dev_get_drvdata(dev);
592	u32 pwm_mode = 0;
593
594	if (rdev->asic->dpm.fan_ctrl_get_mode)
595		pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
596
597	/* never 0 (full-speed), fuse or smc-controlled always */
598	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
599}
600
601static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
602					    struct device_attribute *attr,
603					    const char *buf,
604					    size_t count)
605{
606	struct radeon_device *rdev = dev_get_drvdata(dev);
607	int err;
608	int value;
609
610	if(!rdev->asic->dpm.fan_ctrl_set_mode)
611		return -EINVAL;
612
613	err = kstrtoint(buf, 10, &value);
614	if (err)
615		return err;
616
617	switch (value) {
618	case 1: /* manual, percent-based */
619		rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
620		break;
621	default: /* disable */
622		rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
623		break;
624	}
625
626	return count;
627}
628
629static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
630					 struct device_attribute *attr,
631					 char *buf)
632{
633	return sprintf(buf, "%i\n", 0);
634}
635
636static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
637					 struct device_attribute *attr,
638					 char *buf)
639{
640	return sprintf(buf, "%i\n", 255);
641}
642
643static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
644				     struct device_attribute *attr,
645				     const char *buf, size_t count)
646{
647	struct radeon_device *rdev = dev_get_drvdata(dev);
648	int err;
649	u32 value;
650
651	err = kstrtou32(buf, 10, &value);
652	if (err)
653		return err;
654
655	value = (value * 100) / 255;
656
657	err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
658	if (err)
659		return err;
660
661	return count;
662}
663
664static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
665				     struct device_attribute *attr,
666				     char *buf)
667{
668	struct radeon_device *rdev = dev_get_drvdata(dev);
669	int err;
670	u32 speed;
671
672	err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
673	if (err)
674		return err;
675
676	speed = (speed * 255) / 100;
677
678	return sprintf(buf, "%i\n", speed);
679}
680
681static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
682static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
683static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
684static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
685		   radeon_get_dpm_forced_performance_level,
686		   radeon_set_dpm_forced_performance_level);
687#endif
688
689#ifndef __NetBSD__		/* XXX radeon hwmon */
690static ssize_t radeon_hwmon_show_temp(struct device *dev,
691				      struct device_attribute *attr,
692				      char *buf)
693{
694	struct radeon_device *rdev = dev_get_drvdata(dev);
695	struct drm_device *ddev = rdev->ddev;
696	int temp;
697
698	/* Can't get temperature when the card is off */
699	if  ((rdev->flags & RADEON_IS_PX) &&
700	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
701		return -EINVAL;
702
703	if (rdev->asic->pm.get_temperature)
704		temp = radeon_get_temperature(rdev);
705	else
706		temp = 0;
707
708	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
709}
710
711static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
712					     struct device_attribute *attr,
713					     char *buf)
714{
715	struct radeon_device *rdev = dev_get_drvdata(dev);
716	int hyst = to_sensor_dev_attr(attr)->index;
717	int temp;
718
719	if (hyst)
720		temp = rdev->pm.dpm.thermal.min_temp;
721	else
722		temp = rdev->pm.dpm.thermal.max_temp;
723
724	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
725}
726
727static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
728static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
729static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
730static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
731static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
732static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
733static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
734
735
736static struct attribute *hwmon_attributes[] = {
737	&sensor_dev_attr_temp1_input.dev_attr.attr,
738	&sensor_dev_attr_temp1_crit.dev_attr.attr,
739	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
740	&sensor_dev_attr_pwm1.dev_attr.attr,
741	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
742	&sensor_dev_attr_pwm1_min.dev_attr.attr,
743	&sensor_dev_attr_pwm1_max.dev_attr.attr,
744	NULL
745};
746
747static umode_t hwmon_attributes_visible(struct kobject *kobj,
748					struct attribute *attr, int index)
749{
750	struct device *dev = kobj_to_dev(kobj);
751	struct radeon_device *rdev = dev_get_drvdata(dev);
752	umode_t effective_mode = attr->mode;
753
754	/* Skip attributes if DPM is not enabled */
755	if (rdev->pm.pm_method != PM_METHOD_DPM &&
756	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
757	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
758	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
759	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
760	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
761	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
762		return 0;
763
764	/* Skip fan attributes if fan is not present */
765	if (rdev->pm.no_fan &&
766	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
767	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
768	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
769	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
770		return 0;
771
772	/* mask fan attributes if we have no bindings for this asic to expose */
773	if ((!rdev->asic->dpm.get_fan_speed_percent &&
774	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
775	    (!rdev->asic->dpm.fan_ctrl_get_mode &&
776	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
777		effective_mode &= ~S_IRUGO;
778
779	if ((!rdev->asic->dpm.set_fan_speed_percent &&
780	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
781	    (!rdev->asic->dpm.fan_ctrl_set_mode &&
782	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
783		effective_mode &= ~S_IWUSR;
784
785	/* hide max/min values if we can't both query and manage the fan */
786	if ((!rdev->asic->dpm.set_fan_speed_percent &&
787	     !rdev->asic->dpm.get_fan_speed_percent) &&
788	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
789	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
790		return 0;
791
792	return effective_mode;
793}
794
795static const struct attribute_group hwmon_attrgroup = {
796	.attrs = hwmon_attributes,
797	.is_visible = hwmon_attributes_visible,
798};
799
800static const struct attribute_group *hwmon_groups[] = {
801	&hwmon_attrgroup,
802	NULL
803};
804#endif
805
806static int radeon_hwmon_init(struct radeon_device *rdev)
807{
808	int err = 0;
809
810#ifndef __NetBSD__		/* XXX radeon hwmon */
811	switch (rdev->pm.int_thermal_type) {
812	case THERMAL_TYPE_RV6XX:
813	case THERMAL_TYPE_RV770:
814	case THERMAL_TYPE_EVERGREEN:
815	case THERMAL_TYPE_NI:
816	case THERMAL_TYPE_SUMO:
817	case THERMAL_TYPE_SI:
818	case THERMAL_TYPE_CI:
819	case THERMAL_TYPE_KV:
820		if (rdev->asic->pm.get_temperature == NULL)
821			return err;
822		rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
823									   "radeon", rdev,
824									   hwmon_groups);
825		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
826			err = PTR_ERR(rdev->pm.int_hwmon_dev);
827			dev_err(rdev->dev,
828				"Unable to register hwmon device: %d\n", err);
829		}
830		break;
831	default:
832		break;
833	}
834#endif
835
836	return err;
837}
838
839static void radeon_hwmon_fini(struct radeon_device *rdev)
840{
841#ifndef __NetBSD__		/* XXX radeon hwmon */
842	if (rdev->pm.int_hwmon_dev)
843		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
844#endif
845}
846
847static void radeon_dpm_thermal_work_handler(struct work_struct *work)
848{
849	struct radeon_device *rdev =
850		container_of(work, struct radeon_device,
851			     pm.dpm.thermal.work);
852	/* switch to the thermal state */
853	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
854
855	if (!rdev->pm.dpm_enabled)
856		return;
857
858	if (rdev->asic->pm.get_temperature) {
859		int temp = radeon_get_temperature(rdev);
860
861		if (temp < rdev->pm.dpm.thermal.min_temp)
862			/* switch back the user state */
863			dpm_state = rdev->pm.dpm.user_state;
864	} else {
865		if (rdev->pm.dpm.thermal.high_to_low)
866			/* switch back the user state */
867			dpm_state = rdev->pm.dpm.user_state;
868	}
869	mutex_lock(&rdev->pm.mutex);
870	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
871		rdev->pm.dpm.thermal_active = true;
872	else
873		rdev->pm.dpm.thermal_active = false;
874	rdev->pm.dpm.state = dpm_state;
875	mutex_unlock(&rdev->pm.mutex);
876
877	radeon_pm_compute_clocks(rdev);
878}
879
880static bool radeon_dpm_single_display(struct radeon_device *rdev)
881{
882	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
883		true : false;
884
885	/* check if the vblank period is too short to adjust the mclk */
886	if (single_display && rdev->asic->dpm.vblank_too_short) {
887		if (radeon_dpm_vblank_too_short(rdev))
888			single_display = false;
889	}
890
891	/* 120hz tends to be problematic even if they are under the
892	 * vblank limit.
893	 */
894	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
895		single_display = false;
896
897	return single_display;
898}
899
900static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
901						     enum radeon_pm_state_type dpm_state)
902{
903	int i;
904	struct radeon_ps *ps;
905	u32 ui_class;
906	bool single_display = radeon_dpm_single_display(rdev);
907
908	/* certain older asics have a separare 3D performance state,
909	 * so try that first if the user selected performance
910	 */
911	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
912		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
913	/* balanced states don't exist at the moment */
914	if (dpm_state == POWER_STATE_TYPE_BALANCED)
915		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
916
917restart_search:
918	/* Pick the best power state based on current conditions */
919	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
920		ps = &rdev->pm.dpm.ps[i];
921		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
922		switch (dpm_state) {
923		/* user states */
924		case POWER_STATE_TYPE_BATTERY:
925			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
926				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
927					if (single_display)
928						return ps;
929				} else
930					return ps;
931			}
932			break;
933		case POWER_STATE_TYPE_BALANCED:
934			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
935				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
936					if (single_display)
937						return ps;
938				} else
939					return ps;
940			}
941			break;
942		case POWER_STATE_TYPE_PERFORMANCE:
943			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
944				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
945					if (single_display)
946						return ps;
947				} else
948					return ps;
949			}
950			break;
951		/* internal states */
952		case POWER_STATE_TYPE_INTERNAL_UVD:
953			if (rdev->pm.dpm.uvd_ps)
954				return rdev->pm.dpm.uvd_ps;
955			else
956				break;
957		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
958			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
959				return ps;
960			break;
961		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
962			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
963				return ps;
964			break;
965		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
966			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
967				return ps;
968			break;
969		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
970			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
971				return ps;
972			break;
973		case POWER_STATE_TYPE_INTERNAL_BOOT:
974			return rdev->pm.dpm.boot_ps;
975		case POWER_STATE_TYPE_INTERNAL_THERMAL:
976			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
977				return ps;
978			break;
979		case POWER_STATE_TYPE_INTERNAL_ACPI:
980			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
981				return ps;
982			break;
983		case POWER_STATE_TYPE_INTERNAL_ULV:
984			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
985				return ps;
986			break;
987		case POWER_STATE_TYPE_INTERNAL_3DPERF:
988			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
989				return ps;
990			break;
991		default:
992			break;
993		}
994	}
995	/* use a fallback state if we didn't match */
996	switch (dpm_state) {
997	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
998		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
999		goto restart_search;
1000	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1001	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1002	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1003		if (rdev->pm.dpm.uvd_ps) {
1004			return rdev->pm.dpm.uvd_ps;
1005		} else {
1006			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1007			goto restart_search;
1008		}
1009	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1010		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1011		goto restart_search;
1012	case POWER_STATE_TYPE_INTERNAL_ACPI:
1013		dpm_state = POWER_STATE_TYPE_BATTERY;
1014		goto restart_search;
1015	case POWER_STATE_TYPE_BATTERY:
1016	case POWER_STATE_TYPE_BALANCED:
1017	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1018		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1019		goto restart_search;
1020	default:
1021		break;
1022	}
1023
1024	return NULL;
1025}
1026
1027static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1028{
1029	int i;
1030	struct radeon_ps *ps;
1031	enum radeon_pm_state_type dpm_state;
1032	int ret;
1033	bool single_display = radeon_dpm_single_display(rdev);
1034
1035	/* if dpm init failed */
1036	if (!rdev->pm.dpm_enabled)
1037		return;
1038
1039	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
1040		/* add other state override checks here */
1041		if ((!rdev->pm.dpm.thermal_active) &&
1042		    (!rdev->pm.dpm.uvd_active))
1043			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
1044	}
1045	dpm_state = rdev->pm.dpm.state;
1046
1047	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
1048	if (ps)
1049		rdev->pm.dpm.requested_ps = ps;
1050	else
1051		return;
1052
1053	/* no need to reprogram if nothing changed unless we are on BTC+ */
1054	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
1055		/* vce just modifies an existing state so force a change */
1056		if (ps->vce_active != rdev->pm.dpm.vce_active)
1057			goto force;
1058		/* user has made a display change (such as timing) */
1059		if (rdev->pm.dpm.single_display != single_display)
1060			goto force;
1061		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1062			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
1063			 * all we need to do is update the display configuration.
1064			 */
1065			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
1066				/* update display watermarks based on new power state */
1067				radeon_bandwidth_update(rdev);
1068				/* update displays */
1069				radeon_dpm_display_configuration_changed(rdev);
1070				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1071				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1072			}
1073			return;
1074		} else {
1075			/* for BTC+ if the num crtcs hasn't changed and state is the same,
1076			 * nothing to do, if the num crtcs is > 1 and state is the same,
1077			 * update display configuration.
1078			 */
1079			if (rdev->pm.dpm.new_active_crtcs ==
1080			    rdev->pm.dpm.current_active_crtcs) {
1081				return;
1082			} else {
1083				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
1084				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
1085					/* update display watermarks based on new power state */
1086					radeon_bandwidth_update(rdev);
1087					/* update displays */
1088					radeon_dpm_display_configuration_changed(rdev);
1089					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1090					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1091					return;
1092				}
1093			}
1094		}
1095	}
1096
1097force:
1098	if (radeon_dpm == 1) {
1099		printk("switching from power state:\n");
1100		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
1101		printk("switching to power state:\n");
1102		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
1103	}
1104
1105	down_write(&rdev->pm.mclk_lock);
1106	mutex_lock(&rdev->ring_lock);
1107
1108	/* update whether vce is active */
1109	ps->vce_active = rdev->pm.dpm.vce_active;
1110
1111	ret = radeon_dpm_pre_set_power_state(rdev);
1112	if (ret)
1113		goto done;
1114
1115	/* update display watermarks based on new power state */
1116	radeon_bandwidth_update(rdev);
1117	/* update displays */
1118	radeon_dpm_display_configuration_changed(rdev);
1119
1120	/* wait for the rings to drain */
1121	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1122		struct radeon_ring *ring = &rdev->ring[i];
1123		if (ring->ready)
1124			radeon_fence_wait_empty(rdev, i);
1125	}
1126
1127	/* program the new power state */
1128	radeon_dpm_set_power_state(rdev);
1129
1130	/* update current power state */
1131	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1132
1133	radeon_dpm_post_set_power_state(rdev);
1134
1135	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1136	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1137	rdev->pm.dpm.single_display = single_display;
1138
1139	if (rdev->asic->dpm.force_performance_level) {
1140		if (rdev->pm.dpm.thermal_active) {
1141			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1142			/* force low perf level for thermal */
1143			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1144			/* save the user's level */
1145			rdev->pm.dpm.forced_level = level;
1146		} else {
1147			/* otherwise, user selected level */
1148			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1149		}
1150	}
1151
1152done:
1153	mutex_unlock(&rdev->ring_lock);
1154	up_write(&rdev->pm.mclk_lock);
1155}
1156
1157void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1158{
1159	enum radeon_pm_state_type dpm_state;
1160
1161	if (rdev->asic->dpm.powergate_uvd) {
1162		mutex_lock(&rdev->pm.mutex);
1163		/* don't powergate anything if we
1164		   have active but pause streams */
1165		enable |= rdev->pm.dpm.sd > 0;
1166		enable |= rdev->pm.dpm.hd > 0;
1167		/* enable/disable UVD */
1168		radeon_dpm_powergate_uvd(rdev, !enable);
1169		mutex_unlock(&rdev->pm.mutex);
1170	} else {
1171		if (enable) {
1172			mutex_lock(&rdev->pm.mutex);
1173			rdev->pm.dpm.uvd_active = true;
1174			/* disable this for now */
1175#if 0
1176			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1177				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1178			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1179				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1180			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1181				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1182			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1183				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1184			else
1185#endif
1186				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1187			rdev->pm.dpm.state = dpm_state;
1188			mutex_unlock(&rdev->pm.mutex);
1189		} else {
1190			mutex_lock(&rdev->pm.mutex);
1191			rdev->pm.dpm.uvd_active = false;
1192			mutex_unlock(&rdev->pm.mutex);
1193		}
1194
1195		radeon_pm_compute_clocks(rdev);
1196	}
1197}
1198
1199void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1200{
1201	if (enable) {
1202		mutex_lock(&rdev->pm.mutex);
1203		rdev->pm.dpm.vce_active = true;
1204		/* XXX select vce level based on ring/task */
1205		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1206		mutex_unlock(&rdev->pm.mutex);
1207	} else {
1208		mutex_lock(&rdev->pm.mutex);
1209		rdev->pm.dpm.vce_active = false;
1210		mutex_unlock(&rdev->pm.mutex);
1211	}
1212
1213	radeon_pm_compute_clocks(rdev);
1214}
1215
1216static void radeon_pm_suspend_old(struct radeon_device *rdev)
1217{
1218	mutex_lock(&rdev->pm.mutex);
1219	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1220		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1221			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1222	}
1223	mutex_unlock(&rdev->pm.mutex);
1224
1225	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1226}
1227
1228static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1229{
1230	mutex_lock(&rdev->pm.mutex);
1231	/* disable dpm */
1232	radeon_dpm_disable(rdev);
1233	/* reset the power state */
1234	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1235	rdev->pm.dpm_enabled = false;
1236	mutex_unlock(&rdev->pm.mutex);
1237}
1238
1239void radeon_pm_suspend(struct radeon_device *rdev)
1240{
1241	if (rdev->pm.pm_method == PM_METHOD_DPM)
1242		radeon_pm_suspend_dpm(rdev);
1243	else
1244		radeon_pm_suspend_old(rdev);
1245}
1246
1247static void radeon_pm_resume_old(struct radeon_device *rdev)
1248{
1249	/* set up the default clocks if the MC ucode is loaded */
1250	if ((rdev->family >= CHIP_BARTS) &&
1251	    (rdev->family <= CHIP_CAYMAN) &&
1252	    rdev->mc_fw) {
1253		if (rdev->pm.default_vddc)
1254			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1255						SET_VOLTAGE_TYPE_ASIC_VDDC);
1256		if (rdev->pm.default_vddci)
1257			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1258						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1259		if (rdev->pm.default_sclk)
1260			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1261		if (rdev->pm.default_mclk)
1262			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1263	}
1264	/* asic init will reset the default power state */
1265	mutex_lock(&rdev->pm.mutex);
1266	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1267	rdev->pm.current_clock_mode_index = 0;
1268	rdev->pm.current_sclk = rdev->pm.default_sclk;
1269	rdev->pm.current_mclk = rdev->pm.default_mclk;
1270	if (rdev->pm.power_state) {
1271		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1272		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1273	}
1274	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1275	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1276		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1277		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1278				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1279	}
1280	mutex_unlock(&rdev->pm.mutex);
1281	radeon_pm_compute_clocks(rdev);
1282}
1283
1284static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1285{
1286	int ret;
1287
1288	/* asic init will reset to the boot state */
1289	mutex_lock(&rdev->pm.mutex);
1290	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1291	radeon_dpm_setup_asic(rdev);
1292	ret = radeon_dpm_enable(rdev);
1293	mutex_unlock(&rdev->pm.mutex);
1294	if (ret)
1295		goto dpm_resume_fail;
1296	rdev->pm.dpm_enabled = true;
1297	return;
1298
1299dpm_resume_fail:
1300	DRM_ERROR("radeon: dpm resume failed\n");
1301	if ((rdev->family >= CHIP_BARTS) &&
1302	    (rdev->family <= CHIP_CAYMAN) &&
1303	    rdev->mc_fw) {
1304		if (rdev->pm.default_vddc)
1305			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1306						SET_VOLTAGE_TYPE_ASIC_VDDC);
1307		if (rdev->pm.default_vddci)
1308			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1309						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1310		if (rdev->pm.default_sclk)
1311			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1312		if (rdev->pm.default_mclk)
1313			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1314	}
1315}
1316
1317void radeon_pm_resume(struct radeon_device *rdev)
1318{
1319	if (rdev->pm.pm_method == PM_METHOD_DPM)
1320		radeon_pm_resume_dpm(rdev);
1321	else
1322		radeon_pm_resume_old(rdev);
1323}
1324
1325static int radeon_pm_init_old(struct radeon_device *rdev)
1326{
1327	int ret;
1328
1329	rdev->pm.profile = PM_PROFILE_DEFAULT;
1330	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1331	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1332	rdev->pm.dynpm_can_upclock = true;
1333	rdev->pm.dynpm_can_downclock = true;
1334	rdev->pm.default_sclk = rdev->clock.default_sclk;
1335	rdev->pm.default_mclk = rdev->clock.default_mclk;
1336	rdev->pm.current_sclk = rdev->clock.default_sclk;
1337	rdev->pm.current_mclk = rdev->clock.default_mclk;
1338	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1339
1340	if (rdev->bios) {
1341		if (rdev->is_atom_bios)
1342			radeon_atombios_get_power_modes(rdev);
1343		else
1344			radeon_combios_get_power_modes(rdev);
1345		radeon_pm_print_states(rdev);
1346		radeon_pm_init_profile(rdev);
1347		/* set up the default clocks if the MC ucode is loaded */
1348		if ((rdev->family >= CHIP_BARTS) &&
1349		    (rdev->family <= CHIP_CAYMAN) &&
1350		    rdev->mc_fw) {
1351			if (rdev->pm.default_vddc)
1352				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1353							SET_VOLTAGE_TYPE_ASIC_VDDC);
1354			if (rdev->pm.default_vddci)
1355				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1356							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1357			if (rdev->pm.default_sclk)
1358				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1359			if (rdev->pm.default_mclk)
1360				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1361		}
1362	}
1363
1364	/* set up the internal thermal sensor if applicable */
1365	ret = radeon_hwmon_init(rdev);
1366	if (ret)
1367		return ret;
1368
1369	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1370
1371#ifndef __NetBSD__		/* XXX radeon power */
1372	if (rdev->pm.num_power_states > 1) {
1373		if (radeon_debugfs_pm_init(rdev)) {
1374			DRM_ERROR("Failed to register debugfs file for PM!\n");
1375		}
1376
1377		DRM_INFO("radeon: power management initialized\n");
1378	}
1379#endif
1380
1381	return 0;
1382}
1383
1384static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1385{
1386	int i;
1387
1388	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1389		printk("== power state %d ==\n", i);
1390		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1391	}
1392}
1393
1394static int radeon_pm_init_dpm(struct radeon_device *rdev)
1395{
1396	int ret;
1397
1398	/* default to balanced state */
1399	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1400	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1401	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1402	rdev->pm.default_sclk = rdev->clock.default_sclk;
1403	rdev->pm.default_mclk = rdev->clock.default_mclk;
1404	rdev->pm.current_sclk = rdev->clock.default_sclk;
1405	rdev->pm.current_mclk = rdev->clock.default_mclk;
1406	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1407
1408	if (rdev->bios && rdev->is_atom_bios)
1409		radeon_atombios_get_power_modes(rdev);
1410	else
1411		return -EINVAL;
1412
1413	/* set up the internal thermal sensor if applicable */
1414	ret = radeon_hwmon_init(rdev);
1415	if (ret)
1416		return ret;
1417
1418	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1419	mutex_lock(&rdev->pm.mutex);
1420	radeon_dpm_init(rdev);
1421	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1422	if (radeon_dpm == 1)
1423		radeon_dpm_print_power_states(rdev);
1424	radeon_dpm_setup_asic(rdev);
1425	ret = radeon_dpm_enable(rdev);
1426	mutex_unlock(&rdev->pm.mutex);
1427	if (ret)
1428		goto dpm_failed;
1429	rdev->pm.dpm_enabled = true;
1430
1431	if (radeon_debugfs_pm_init(rdev)) {
1432		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1433	}
1434
1435	DRM_INFO("radeon: dpm initialized\n");
1436
1437	return 0;
1438
1439dpm_failed:
1440	rdev->pm.dpm_enabled = false;
1441	if ((rdev->family >= CHIP_BARTS) &&
1442	    (rdev->family <= CHIP_CAYMAN) &&
1443	    rdev->mc_fw) {
1444		if (rdev->pm.default_vddc)
1445			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1446						SET_VOLTAGE_TYPE_ASIC_VDDC);
1447		if (rdev->pm.default_vddci)
1448			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1449						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1450		if (rdev->pm.default_sclk)
1451			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1452		if (rdev->pm.default_mclk)
1453			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1454	}
1455	DRM_ERROR("radeon: dpm initialization failed\n");
1456	return ret;
1457}
1458
1459struct radeon_dpm_quirk {
1460	u32 chip_vendor;
1461	u32 chip_device;
1462	u32 subsys_vendor;
1463	u32 subsys_device;
1464};
1465
1466/* cards with dpm stability problems */
1467static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1468	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1469	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1470	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1471	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1472	{ 0, 0, 0, 0 },
1473};
1474
1475int radeon_pm_init(struct radeon_device *rdev)
1476{
1477	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1478	bool disable_dpm = false;
1479
1480	/* Apply dpm quirks */
1481	while (p && p->chip_device != 0) {
1482		if (rdev->pdev->vendor == p->chip_vendor &&
1483		    rdev->pdev->device == p->chip_device &&
1484		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1485		    rdev->pdev->subsystem_device == p->subsys_device) {
1486			disable_dpm = true;
1487			break;
1488		}
1489		++p;
1490	}
1491
1492	/* enable dpm on rv6xx+ */
1493	switch (rdev->family) {
1494	case CHIP_RV610:
1495	case CHIP_RV630:
1496	case CHIP_RV620:
1497	case CHIP_RV635:
1498	case CHIP_RV670:
1499	case CHIP_RS780:
1500	case CHIP_RS880:
1501	case CHIP_RV770:
1502		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1503		if (!rdev->rlc_fw)
1504			rdev->pm.pm_method = PM_METHOD_PROFILE;
1505		else if ((rdev->family >= CHIP_RV770) &&
1506			 (!(rdev->flags & RADEON_IS_IGP)) &&
1507			 (!rdev->smc_fw))
1508			rdev->pm.pm_method = PM_METHOD_PROFILE;
1509		else if (radeon_dpm == 1)
1510			rdev->pm.pm_method = PM_METHOD_DPM;
1511		else
1512			rdev->pm.pm_method = PM_METHOD_PROFILE;
1513		break;
1514	case CHIP_RV730:
1515	case CHIP_RV710:
1516	case CHIP_RV740:
1517	case CHIP_CEDAR:
1518	case CHIP_REDWOOD:
1519	case CHIP_JUNIPER:
1520	case CHIP_CYPRESS:
1521	case CHIP_HEMLOCK:
1522	case CHIP_PALM:
1523	case CHIP_SUMO:
1524	case CHIP_SUMO2:
1525	case CHIP_BARTS:
1526	case CHIP_TURKS:
1527	case CHIP_CAICOS:
1528	case CHIP_CAYMAN:
1529	case CHIP_ARUBA:
1530	case CHIP_TAHITI:
1531	case CHIP_PITCAIRN:
1532	case CHIP_VERDE:
1533	case CHIP_OLAND:
1534	case CHIP_HAINAN:
1535	case CHIP_BONAIRE:
1536	case CHIP_KABINI:
1537	case CHIP_KAVERI:
1538	case CHIP_HAWAII:
1539	case CHIP_MULLINS:
1540		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1541		if (!rdev->rlc_fw)
1542			rdev->pm.pm_method = PM_METHOD_PROFILE;
1543		else if ((rdev->family >= CHIP_RV770) &&
1544			 (!(rdev->flags & RADEON_IS_IGP)) &&
1545			 (!rdev->smc_fw))
1546			rdev->pm.pm_method = PM_METHOD_PROFILE;
1547		else if (disable_dpm && (radeon_dpm == -1))
1548			rdev->pm.pm_method = PM_METHOD_PROFILE;
1549		else if (radeon_dpm == 0)
1550			rdev->pm.pm_method = PM_METHOD_PROFILE;
1551		else
1552			rdev->pm.pm_method = PM_METHOD_DPM;
1553		break;
1554	default:
1555		/* default to profile method */
1556		rdev->pm.pm_method = PM_METHOD_PROFILE;
1557		break;
1558	}
1559
1560	if (rdev->pm.pm_method == PM_METHOD_DPM)
1561		return radeon_pm_init_dpm(rdev);
1562	else
1563		return radeon_pm_init_old(rdev);
1564}
1565
1566int radeon_pm_late_init(struct radeon_device *rdev)
1567{
1568	int ret = 0;
1569
1570	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1571		if (rdev->pm.dpm_enabled) {
1572#ifndef __NetBSD__		/* XXX radeon sysfs */
1573			if (!rdev->pm.sysfs_initialized) {
1574				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1575				if (ret)
1576					DRM_ERROR("failed to create device file for dpm state\n");
1577				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1578				if (ret)
1579					DRM_ERROR("failed to create device file for dpm state\n");
1580				/* XXX: these are noops for dpm but are here for backwards compat */
1581				ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1582				if (ret)
1583					DRM_ERROR("failed to create device file for power profile\n");
1584				ret = device_create_file(rdev->dev, &dev_attr_power_method);
1585				if (ret)
1586					DRM_ERROR("failed to create device file for power method\n");
1587				rdev->pm.sysfs_initialized = true;
1588			}
1589#endif
1590
1591			mutex_lock(&rdev->pm.mutex);
1592			ret = radeon_dpm_late_enable(rdev);
1593			mutex_unlock(&rdev->pm.mutex);
1594			if (ret) {
1595				rdev->pm.dpm_enabled = false;
1596				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1597			} else {
1598				/* set the dpm state for PX since there won't be
1599				 * a modeset to call this.
1600				 */
1601				radeon_pm_compute_clocks(rdev);
1602			}
1603		}
1604	} else {
1605		if ((rdev->pm.num_power_states > 1) &&
1606		    (!rdev->pm.sysfs_initialized)) {
1607#ifndef __NetBSD__	     /* XXX radeon sysfs */
1608			/* where's the best place to put these? */
1609			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1610			if (ret)
1611				DRM_ERROR("failed to create device file for power profile\n");
1612			ret = device_create_file(rdev->dev, &dev_attr_power_method);
1613			if (ret)
1614				DRM_ERROR("failed to create device file for power method\n");
1615			if (!ret)
1616				rdev->pm.sysfs_initialized = true;
1617#endif
1618		}
1619	}
1620	return ret;
1621}
1622
1623static void radeon_pm_fini_old(struct radeon_device *rdev)
1624{
1625	if (rdev->pm.num_power_states > 1) {
1626		mutex_lock(&rdev->pm.mutex);
1627		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1628			rdev->pm.profile = PM_PROFILE_DEFAULT;
1629			radeon_pm_update_profile(rdev);
1630			radeon_pm_set_clocks(rdev);
1631		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1632			/* reset default clocks */
1633			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1634			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1635			radeon_pm_set_clocks(rdev);
1636		}
1637		mutex_unlock(&rdev->pm.mutex);
1638
1639		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1640
1641#ifndef __NetBSD__		/* XXX radeon power */
1642		device_remove_file(rdev->dev, &dev_attr_power_profile);
1643		device_remove_file(rdev->dev, &dev_attr_power_method);
1644#endif
1645	}
1646
1647	radeon_hwmon_fini(rdev);
1648	kfree(rdev->pm.power_state);
1649}
1650
1651static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1652{
1653	if (rdev->pm.num_power_states > 1) {
1654		mutex_lock(&rdev->pm.mutex);
1655		radeon_dpm_disable(rdev);
1656		mutex_unlock(&rdev->pm.mutex);
1657
1658#ifndef __NetBSD__		/* XXX radeon power */
1659		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1660		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1661		/* XXX backwards compat */
1662		device_remove_file(rdev->dev, &dev_attr_power_profile);
1663		device_remove_file(rdev->dev, &dev_attr_power_method);
1664#endif
1665	}
1666	radeon_dpm_fini(rdev);
1667
1668	radeon_hwmon_fini(rdev);
1669	kfree(rdev->pm.power_state);
1670}
1671
1672void radeon_pm_fini(struct radeon_device *rdev)
1673{
1674	if (rdev->pm.pm_method == PM_METHOD_DPM)
1675		radeon_pm_fini_dpm(rdev);
1676	else
1677		radeon_pm_fini_old(rdev);
1678}
1679
1680static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1681{
1682	struct drm_device *ddev = rdev->ddev;
1683	struct drm_crtc *crtc;
1684	struct radeon_crtc *radeon_crtc;
1685
1686	if (rdev->pm.num_power_states < 2)
1687		return;
1688
1689	mutex_lock(&rdev->pm.mutex);
1690
1691	rdev->pm.active_crtcs = 0;
1692	rdev->pm.active_crtc_count = 0;
1693	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1694		list_for_each_entry(crtc,
1695				    &ddev->mode_config.crtc_list, head) {
1696			radeon_crtc = to_radeon_crtc(crtc);
1697			if (radeon_crtc->enabled) {
1698				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1699				rdev->pm.active_crtc_count++;
1700			}
1701		}
1702	}
1703
1704	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1705		radeon_pm_update_profile(rdev);
1706		radeon_pm_set_clocks(rdev);
1707	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1708		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1709			if (rdev->pm.active_crtc_count > 1) {
1710				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1711					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1712
1713					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1714					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1715					radeon_pm_get_dynpm_state(rdev);
1716					radeon_pm_set_clocks(rdev);
1717
1718					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1719				}
1720			} else if (rdev->pm.active_crtc_count == 1) {
1721				/* TODO: Increase clocks if needed for current mode */
1722
1723				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1724					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1725					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1726					radeon_pm_get_dynpm_state(rdev);
1727					radeon_pm_set_clocks(rdev);
1728
1729					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1730							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1731				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1732					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1733					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1734							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1735					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1736				}
1737			} else { /* count == 0 */
1738				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1739					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1740
1741					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1742					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1743					radeon_pm_get_dynpm_state(rdev);
1744					radeon_pm_set_clocks(rdev);
1745				}
1746			}
1747		}
1748	}
1749
1750	mutex_unlock(&rdev->pm.mutex);
1751}
1752
1753static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1754{
1755	struct drm_device *ddev = rdev->ddev;
1756	struct drm_crtc *crtc;
1757	struct radeon_crtc *radeon_crtc;
1758
1759	if (!rdev->pm.dpm_enabled)
1760		return;
1761
1762	mutex_lock(&rdev->pm.mutex);
1763
1764	/* update active crtc counts */
1765	rdev->pm.dpm.new_active_crtcs = 0;
1766	rdev->pm.dpm.new_active_crtc_count = 0;
1767	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1768		list_for_each_entry(crtc,
1769				    &ddev->mode_config.crtc_list, head) {
1770			radeon_crtc = to_radeon_crtc(crtc);
1771			if (crtc->enabled) {
1772				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1773				rdev->pm.dpm.new_active_crtc_count++;
1774			}
1775		}
1776	}
1777
1778	/* update battery/ac status */
1779	if (power_supply_is_system_supplied() > 0)
1780		rdev->pm.dpm.ac_power = true;
1781	else
1782		rdev->pm.dpm.ac_power = false;
1783
1784	radeon_dpm_change_power_state_locked(rdev);
1785
1786	mutex_unlock(&rdev->pm.mutex);
1787
1788}
1789
1790void radeon_pm_compute_clocks(struct radeon_device *rdev)
1791{
1792	if (rdev->pm.pm_method == PM_METHOD_DPM)
1793		radeon_pm_compute_clocks_dpm(rdev);
1794	else
1795		radeon_pm_compute_clocks_old(rdev);
1796}
1797
1798static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1799{
1800	int  crtc, vpos, hpos, vbl_status;
1801	bool in_vbl = true;
1802
1803	/* Iterate over all active crtc's. All crtc's must be in vblank,
1804	 * otherwise return in_vbl == false.
1805	 */
1806	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1807		if (rdev->pm.active_crtcs & (1 << crtc)) {
1808			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1809								crtc,
1810								USE_REAL_VBLANKSTART,
1811								&vpos, &hpos, NULL, NULL,
1812								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1813			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1814			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1815				in_vbl = false;
1816		}
1817	}
1818
1819	return in_vbl;
1820}
1821
1822static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1823{
1824	u32 stat_crtc = 0;
1825	bool in_vbl = radeon_pm_in_vbl(rdev);
1826
1827	if (!in_vbl)
1828		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1829			 finish ? "exit" : "entry");
1830	return in_vbl;
1831}
1832
1833static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1834{
1835	struct radeon_device *rdev;
1836	int resched;
1837	rdev = container_of(work, struct radeon_device,
1838				pm.dynpm_idle_work.work);
1839
1840	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1841	mutex_lock(&rdev->pm.mutex);
1842	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1843		int not_processed = 0;
1844		int i;
1845
1846		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1847			struct radeon_ring *ring = &rdev->ring[i];
1848
1849			if (ring->ready) {
1850				not_processed += radeon_fence_count_emitted(rdev, i);
1851				if (not_processed >= 3)
1852					break;
1853			}
1854		}
1855
1856		if (not_processed >= 3) { /* should upclock */
1857			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1858				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1859			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1860				   rdev->pm.dynpm_can_upclock) {
1861				rdev->pm.dynpm_planned_action =
1862					DYNPM_ACTION_UPCLOCK;
1863				rdev->pm.dynpm_action_timeout = jiffies +
1864				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1865			}
1866		} else if (not_processed == 0) { /* should downclock */
1867			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1868				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1869			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1870				   rdev->pm.dynpm_can_downclock) {
1871				rdev->pm.dynpm_planned_action =
1872					DYNPM_ACTION_DOWNCLOCK;
1873				rdev->pm.dynpm_action_timeout = jiffies +
1874				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1875			}
1876		}
1877
1878		/* Note, radeon_pm_set_clocks is called with static_switch set
1879		 * to false since we want to wait for vbl to avoid flicker.
1880		 */
1881		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1882		    jiffies > rdev->pm.dynpm_action_timeout) {
1883			radeon_pm_get_dynpm_state(rdev);
1884			radeon_pm_set_clocks(rdev);
1885		}
1886
1887		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1888				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1889	}
1890	mutex_unlock(&rdev->pm.mutex);
1891	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1892}
1893
1894/*
1895 * Debugfs info
1896 */
1897#if defined(CONFIG_DEBUG_FS)
1898
1899static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1900{
1901	struct drm_info_node *node = (struct drm_info_node *) m->private;
1902	struct drm_device *dev = node->minor->dev;
1903	struct radeon_device *rdev = dev->dev_private;
1904	struct drm_device *ddev = rdev->ddev;
1905
1906	if  ((rdev->flags & RADEON_IS_PX) &&
1907	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1908		seq_printf(m, "PX asic powered off\n");
1909	} else if (rdev->pm.dpm_enabled) {
1910		mutex_lock(&rdev->pm.mutex);
1911		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1912			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1913		else
1914			seq_printf(m, "Debugfs support not implemented for this asic\n");
1915		mutex_unlock(&rdev->pm.mutex);
1916	} else {
1917		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1918		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1919		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1920			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1921		else
1922			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1923		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1924		if (rdev->asic->pm.get_memory_clock)
1925			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1926		if (rdev->pm.current_vddc)
1927			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1928		if (rdev->asic->pm.get_pcie_lanes)
1929			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1930	}
1931
1932	return 0;
1933}
1934
1935static struct drm_info_list radeon_pm_info_list[] = {
1936	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1937};
1938#endif
1939
1940static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1941{
1942#if defined(CONFIG_DEBUG_FS)
1943	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1944#else
1945	return 0;
1946#endif
1947}
1948