• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/gpu/drm/radeon/
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
32#include "drmP.h"
33#include "radeon_drm.h"
34#include "radeon.h"
35#include "radeon_asic.h"
36#include "radeon_mode.h"
37#include "r600d.h"
38#include "atom.h"
39#include "avivod.h"
40
41#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
43#define RLC_UCODE_SIZE 768
44#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
46#define R700_RLC_UCODE_SIZE 1024
47#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
49#define EVERGREEN_RLC_UCODE_SIZE 768
50
51/* Firmware Names */
52MODULE_FIRMWARE("radeon/R600_pfp.bin");
53MODULE_FIRMWARE("radeon/R600_me.bin");
54MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55MODULE_FIRMWARE("radeon/RV610_me.bin");
56MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57MODULE_FIRMWARE("radeon/RV630_me.bin");
58MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59MODULE_FIRMWARE("radeon/RV620_me.bin");
60MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61MODULE_FIRMWARE("radeon/RV635_me.bin");
62MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63MODULE_FIRMWARE("radeon/RV670_me.bin");
64MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65MODULE_FIRMWARE("radeon/RS780_me.bin");
66MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67MODULE_FIRMWARE("radeon/RV770_me.bin");
68MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69MODULE_FIRMWARE("radeon/RV730_me.bin");
70MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71MODULE_FIRMWARE("radeon/RV710_me.bin");
72MODULE_FIRMWARE("radeon/R600_rlc.bin");
73MODULE_FIRMWARE("radeon/R700_rlc.bin");
74MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86
87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88
89/* r600,rv610,rv630,rv620,rv635,rv670 */
90int r600_mc_wait_for_idle(struct radeon_device *rdev);
91void r600_gpu_init(struct radeon_device *rdev);
92void r600_fini(struct radeon_device *rdev);
93void r600_irq_disable(struct radeon_device *rdev);
94
95/* get temperature in millidegrees */
96u32 rv6xx_get_temp(struct radeon_device *rdev)
97{
98	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99		ASIC_T_SHIFT;
100
101	return temp * 1000;
102}
103
104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
105{
106	int i;
107
108	rdev->pm.dynpm_can_upclock = true;
109	rdev->pm.dynpm_can_downclock = true;
110
111	/* power state array is low to high, default is first */
112	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
113		int min_power_state_index = 0;
114
115		if (rdev->pm.num_power_states > 2)
116			min_power_state_index = 1;
117
118		switch (rdev->pm.dynpm_planned_action) {
119		case DYNPM_ACTION_MINIMUM:
120			rdev->pm.requested_power_state_index = min_power_state_index;
121			rdev->pm.requested_clock_mode_index = 0;
122			rdev->pm.dynpm_can_downclock = false;
123			break;
124		case DYNPM_ACTION_DOWNCLOCK:
125			if (rdev->pm.current_power_state_index == min_power_state_index) {
126				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
127				rdev->pm.dynpm_can_downclock = false;
128			} else {
129				if (rdev->pm.active_crtc_count > 1) {
130					for (i = 0; i < rdev->pm.num_power_states; i++) {
131						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
132							continue;
133						else if (i >= rdev->pm.current_power_state_index) {
134							rdev->pm.requested_power_state_index =
135								rdev->pm.current_power_state_index;
136							break;
137						} else {
138							rdev->pm.requested_power_state_index = i;
139							break;
140						}
141					}
142				} else {
143					if (rdev->pm.current_power_state_index == 0)
144						rdev->pm.requested_power_state_index =
145							rdev->pm.num_power_states - 1;
146					else
147						rdev->pm.requested_power_state_index =
148							rdev->pm.current_power_state_index - 1;
149				}
150			}
151			rdev->pm.requested_clock_mode_index = 0;
152			/* don't use the power state if crtcs are active and no display flag is set */
153			if ((rdev->pm.active_crtc_count > 0) &&
154			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
155			     clock_info[rdev->pm.requested_clock_mode_index].flags &
156			     RADEON_PM_MODE_NO_DISPLAY)) {
157				rdev->pm.requested_power_state_index++;
158			}
159			break;
160		case DYNPM_ACTION_UPCLOCK:
161			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
162				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
163				rdev->pm.dynpm_can_upclock = false;
164			} else {
165				if (rdev->pm.active_crtc_count > 1) {
166					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
167						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
168							continue;
169						else if (i <= rdev->pm.current_power_state_index) {
170							rdev->pm.requested_power_state_index =
171								rdev->pm.current_power_state_index;
172							break;
173						} else {
174							rdev->pm.requested_power_state_index = i;
175							break;
176						}
177					}
178				} else
179					rdev->pm.requested_power_state_index =
180						rdev->pm.current_power_state_index + 1;
181			}
182			rdev->pm.requested_clock_mode_index = 0;
183			break;
184		case DYNPM_ACTION_DEFAULT:
185			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
186			rdev->pm.requested_clock_mode_index = 0;
187			rdev->pm.dynpm_can_upclock = false;
188			break;
189		case DYNPM_ACTION_NONE:
190		default:
191			DRM_ERROR("Requested mode for not defined action\n");
192			return;
193		}
194	} else {
195		/* for now just select the first power state and switch between clock modes */
196		/* power state array is low to high, default is first (0) */
197		if (rdev->pm.active_crtc_count > 1) {
198			rdev->pm.requested_power_state_index = -1;
199			/* start at 1 as we don't want the default mode */
200			for (i = 1; i < rdev->pm.num_power_states; i++) {
201				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
202					continue;
203				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
204					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
205					rdev->pm.requested_power_state_index = i;
206					break;
207				}
208			}
209			/* if nothing selected, grab the default state. */
210			if (rdev->pm.requested_power_state_index == -1)
211				rdev->pm.requested_power_state_index = 0;
212		} else
213			rdev->pm.requested_power_state_index = 1;
214
215		switch (rdev->pm.dynpm_planned_action) {
216		case DYNPM_ACTION_MINIMUM:
217			rdev->pm.requested_clock_mode_index = 0;
218			rdev->pm.dynpm_can_downclock = false;
219			break;
220		case DYNPM_ACTION_DOWNCLOCK:
221			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
222				if (rdev->pm.current_clock_mode_index == 0) {
223					rdev->pm.requested_clock_mode_index = 0;
224					rdev->pm.dynpm_can_downclock = false;
225				} else
226					rdev->pm.requested_clock_mode_index =
227						rdev->pm.current_clock_mode_index - 1;
228			} else {
229				rdev->pm.requested_clock_mode_index = 0;
230				rdev->pm.dynpm_can_downclock = false;
231			}
232			/* don't use the power state if crtcs are active and no display flag is set */
233			if ((rdev->pm.active_crtc_count > 0) &&
234			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
235			     clock_info[rdev->pm.requested_clock_mode_index].flags &
236			     RADEON_PM_MODE_NO_DISPLAY)) {
237				rdev->pm.requested_clock_mode_index++;
238			}
239			break;
240		case DYNPM_ACTION_UPCLOCK:
241			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
242				if (rdev->pm.current_clock_mode_index ==
243				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
244					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
245					rdev->pm.dynpm_can_upclock = false;
246				} else
247					rdev->pm.requested_clock_mode_index =
248						rdev->pm.current_clock_mode_index + 1;
249			} else {
250				rdev->pm.requested_clock_mode_index =
251					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
252				rdev->pm.dynpm_can_upclock = false;
253			}
254			break;
255		case DYNPM_ACTION_DEFAULT:
256			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
257			rdev->pm.requested_clock_mode_index = 0;
258			rdev->pm.dynpm_can_upclock = false;
259			break;
260		case DYNPM_ACTION_NONE:
261		default:
262			DRM_ERROR("Requested mode for not defined action\n");
263			return;
264		}
265	}
266
267	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
268		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
269		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
270		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
271		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
272		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
273		  pcie_lanes);
274}
275
276static int r600_pm_get_type_index(struct radeon_device *rdev,
277				  enum radeon_pm_state_type ps_type,
278				  int instance)
279{
280	int i;
281	int found_instance = -1;
282
283	for (i = 0; i < rdev->pm.num_power_states; i++) {
284		if (rdev->pm.power_state[i].type == ps_type) {
285			found_instance++;
286			if (found_instance == instance)
287				return i;
288		}
289	}
290	/* return default if no match */
291	return rdev->pm.default_power_state_index;
292}
293
294void rs780_pm_init_profile(struct radeon_device *rdev)
295{
296	if (rdev->pm.num_power_states == 2) {
297		/* default */
298		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
299		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
300		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
301		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
302		/* low sh */
303		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
304		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
305		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
306		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
307		/* mid sh */
308		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
309		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
310		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
311		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
312		/* high sh */
313		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
314		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
315		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
316		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
317		/* low mh */
318		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
319		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
320		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
321		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
322		/* mid mh */
323		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
324		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
325		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
326		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
327		/* high mh */
328		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
329		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
330		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
331		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
332	} else if (rdev->pm.num_power_states == 3) {
333		/* default */
334		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
335		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
336		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
337		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
338		/* low sh */
339		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
340		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
341		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
342		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
343		/* mid sh */
344		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
345		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
346		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
347		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
348		/* high sh */
349		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
350		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
351		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
352		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
353		/* low mh */
354		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
355		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
356		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
357		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
358		/* mid mh */
359		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
360		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
361		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
362		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
363		/* high mh */
364		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
365		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
366		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
367		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
368	} else {
369		/* default */
370		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
371		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
372		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
373		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
374		/* low sh */
375		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
376		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
377		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
378		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
379		/* mid sh */
380		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
381		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
382		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
383		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
384		/* high sh */
385		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
386		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
387		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
388		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
389		/* low mh */
390		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
391		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
392		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
393		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
394		/* mid mh */
395		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
396		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
397		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
398		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
399		/* high mh */
400		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
401		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
402		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
403		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
404	}
405}
406
407void r600_pm_init_profile(struct radeon_device *rdev)
408{
409	if (rdev->family == CHIP_R600) {
410		/* default */
411		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
412		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
413		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
414		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
415		/* low sh */
416		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
417		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
418		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
419		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
420		/* mid sh */
421		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
422		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
423		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
424		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
425		/* high sh */
426		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
429		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
430		/* low mh */
431		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
434		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
435		/* mid mh */
436		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
439		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
440		/* high mh */
441		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
444		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
445	} else {
446		if (rdev->pm.num_power_states < 4) {
447			/* default */
448			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
449			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
450			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
451			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
452			/* low sh */
453			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
454			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
455			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
456			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
457			/* mid sh */
458			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
459			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
460			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
461			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
462			/* high sh */
463			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
464			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
465			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
466			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
467			/* low mh */
468			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
469			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
470			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
471			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
472			/* low mh */
473			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
474			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
475			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
476			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
477			/* high mh */
478			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
479			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
480			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
481			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
482		} else {
483			/* default */
484			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
485			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
486			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
487			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
488			/* low sh */
489			if (rdev->flags & RADEON_IS_MOBILITY) {
490				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
491					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
492				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
493					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
494				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
495				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
496			} else {
497				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
498					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
499				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
500					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
501				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
502				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
503			}
504			/* mid sh */
505			if (rdev->flags & RADEON_IS_MOBILITY) {
506				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
507					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
508				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
509					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
510				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
511				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
512			} else {
513				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
514					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
516					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
518				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
519			}
520			/* high sh */
521			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
522				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
523			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
524				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
525			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
526			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
527			/* low mh */
528			if (rdev->flags & RADEON_IS_MOBILITY) {
529				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
530					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
531				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
532					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
533				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
534				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
535			} else {
536				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
537					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
538				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
539					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
540				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
541				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
542			}
543			/* mid mh */
544			if (rdev->flags & RADEON_IS_MOBILITY) {
545				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
546					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
547				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
548					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
549				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
550				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
551			} else {
552				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
553					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
555					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
557				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
558			}
559			/* high mh */
560			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
561				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
562			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
563				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
564			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
565			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
566		}
567	}
568}
569
570void r600_pm_misc(struct radeon_device *rdev)
571{
572	int req_ps_idx = rdev->pm.requested_power_state_index;
573	int req_cm_idx = rdev->pm.requested_clock_mode_index;
574	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
575	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
576
577	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
578		if (voltage->voltage != rdev->pm.current_vddc) {
579			radeon_atom_set_voltage(rdev, voltage->voltage);
580			rdev->pm.current_vddc = voltage->voltage;
581			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
582		}
583	}
584}
585
586bool r600_gui_idle(struct radeon_device *rdev)
587{
588	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
589		return false;
590	else
591		return true;
592}
593
594/* hpd for digital panel detect/disconnect */
595bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
596{
597	bool connected = false;
598
599	if (ASIC_IS_DCE3(rdev)) {
600		switch (hpd) {
601		case RADEON_HPD_1:
602			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
603				connected = true;
604			break;
605		case RADEON_HPD_2:
606			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
607				connected = true;
608			break;
609		case RADEON_HPD_3:
610			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
611				connected = true;
612			break;
613		case RADEON_HPD_4:
614			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
615				connected = true;
616			break;
617			/* DCE 3.2 */
618		case RADEON_HPD_5:
619			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
620				connected = true;
621			break;
622		case RADEON_HPD_6:
623			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
624				connected = true;
625			break;
626		default:
627			break;
628		}
629	} else {
630		switch (hpd) {
631		case RADEON_HPD_1:
632			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
633				connected = true;
634			break;
635		case RADEON_HPD_2:
636			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
637				connected = true;
638			break;
639		case RADEON_HPD_3:
640			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
641				connected = true;
642			break;
643		default:
644			break;
645		}
646	}
647	return connected;
648}
649
650void r600_hpd_set_polarity(struct radeon_device *rdev,
651			   enum radeon_hpd_id hpd)
652{
653	u32 tmp;
654	bool connected = r600_hpd_sense(rdev, hpd);
655
656	if (ASIC_IS_DCE3(rdev)) {
657		switch (hpd) {
658		case RADEON_HPD_1:
659			tmp = RREG32(DC_HPD1_INT_CONTROL);
660			if (connected)
661				tmp &= ~DC_HPDx_INT_POLARITY;
662			else
663				tmp |= DC_HPDx_INT_POLARITY;
664			WREG32(DC_HPD1_INT_CONTROL, tmp);
665			break;
666		case RADEON_HPD_2:
667			tmp = RREG32(DC_HPD2_INT_CONTROL);
668			if (connected)
669				tmp &= ~DC_HPDx_INT_POLARITY;
670			else
671				tmp |= DC_HPDx_INT_POLARITY;
672			WREG32(DC_HPD2_INT_CONTROL, tmp);
673			break;
674		case RADEON_HPD_3:
675			tmp = RREG32(DC_HPD3_INT_CONTROL);
676			if (connected)
677				tmp &= ~DC_HPDx_INT_POLARITY;
678			else
679				tmp |= DC_HPDx_INT_POLARITY;
680			WREG32(DC_HPD3_INT_CONTROL, tmp);
681			break;
682		case RADEON_HPD_4:
683			tmp = RREG32(DC_HPD4_INT_CONTROL);
684			if (connected)
685				tmp &= ~DC_HPDx_INT_POLARITY;
686			else
687				tmp |= DC_HPDx_INT_POLARITY;
688			WREG32(DC_HPD4_INT_CONTROL, tmp);
689			break;
690		case RADEON_HPD_5:
691			tmp = RREG32(DC_HPD5_INT_CONTROL);
692			if (connected)
693				tmp &= ~DC_HPDx_INT_POLARITY;
694			else
695				tmp |= DC_HPDx_INT_POLARITY;
696			WREG32(DC_HPD5_INT_CONTROL, tmp);
697			break;
698			/* DCE 3.2 */
699		case RADEON_HPD_6:
700			tmp = RREG32(DC_HPD6_INT_CONTROL);
701			if (connected)
702				tmp &= ~DC_HPDx_INT_POLARITY;
703			else
704				tmp |= DC_HPDx_INT_POLARITY;
705			WREG32(DC_HPD6_INT_CONTROL, tmp);
706			break;
707		default:
708			break;
709		}
710	} else {
711		switch (hpd) {
712		case RADEON_HPD_1:
713			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
714			if (connected)
715				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
716			else
717				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
718			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
719			break;
720		case RADEON_HPD_2:
721			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
722			if (connected)
723				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
724			else
725				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
726			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
727			break;
728		case RADEON_HPD_3:
729			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
730			if (connected)
731				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
732			else
733				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
734			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
735			break;
736		default:
737			break;
738		}
739	}
740}
741
742void r600_hpd_init(struct radeon_device *rdev)
743{
744	struct drm_device *dev = rdev->ddev;
745	struct drm_connector *connector;
746
747	if (ASIC_IS_DCE3(rdev)) {
748		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
749		if (ASIC_IS_DCE32(rdev))
750			tmp |= DC_HPDx_EN;
751
752		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
753			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
754			switch (radeon_connector->hpd.hpd) {
755			case RADEON_HPD_1:
756				WREG32(DC_HPD1_CONTROL, tmp);
757				rdev->irq.hpd[0] = true;
758				break;
759			case RADEON_HPD_2:
760				WREG32(DC_HPD2_CONTROL, tmp);
761				rdev->irq.hpd[1] = true;
762				break;
763			case RADEON_HPD_3:
764				WREG32(DC_HPD3_CONTROL, tmp);
765				rdev->irq.hpd[2] = true;
766				break;
767			case RADEON_HPD_4:
768				WREG32(DC_HPD4_CONTROL, tmp);
769				rdev->irq.hpd[3] = true;
770				break;
771				/* DCE 3.2 */
772			case RADEON_HPD_5:
773				WREG32(DC_HPD5_CONTROL, tmp);
774				rdev->irq.hpd[4] = true;
775				break;
776			case RADEON_HPD_6:
777				WREG32(DC_HPD6_CONTROL, tmp);
778				rdev->irq.hpd[5] = true;
779				break;
780			default:
781				break;
782			}
783		}
784	} else {
785		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
786			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
787			switch (radeon_connector->hpd.hpd) {
788			case RADEON_HPD_1:
789				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
790				rdev->irq.hpd[0] = true;
791				break;
792			case RADEON_HPD_2:
793				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
794				rdev->irq.hpd[1] = true;
795				break;
796			case RADEON_HPD_3:
797				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
798				rdev->irq.hpd[2] = true;
799				break;
800			default:
801				break;
802			}
803		}
804	}
805	if (rdev->irq.installed)
806		r600_irq_set(rdev);
807}
808
809void r600_hpd_fini(struct radeon_device *rdev)
810{
811	struct drm_device *dev = rdev->ddev;
812	struct drm_connector *connector;
813
814	if (ASIC_IS_DCE3(rdev)) {
815		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
816			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
817			switch (radeon_connector->hpd.hpd) {
818			case RADEON_HPD_1:
819				WREG32(DC_HPD1_CONTROL, 0);
820				rdev->irq.hpd[0] = false;
821				break;
822			case RADEON_HPD_2:
823				WREG32(DC_HPD2_CONTROL, 0);
824				rdev->irq.hpd[1] = false;
825				break;
826			case RADEON_HPD_3:
827				WREG32(DC_HPD3_CONTROL, 0);
828				rdev->irq.hpd[2] = false;
829				break;
830			case RADEON_HPD_4:
831				WREG32(DC_HPD4_CONTROL, 0);
832				rdev->irq.hpd[3] = false;
833				break;
834				/* DCE 3.2 */
835			case RADEON_HPD_5:
836				WREG32(DC_HPD5_CONTROL, 0);
837				rdev->irq.hpd[4] = false;
838				break;
839			case RADEON_HPD_6:
840				WREG32(DC_HPD6_CONTROL, 0);
841				rdev->irq.hpd[5] = false;
842				break;
843			default:
844				break;
845			}
846		}
847	} else {
848		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
849			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
850			switch (radeon_connector->hpd.hpd) {
851			case RADEON_HPD_1:
852				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
853				rdev->irq.hpd[0] = false;
854				break;
855			case RADEON_HPD_2:
856				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
857				rdev->irq.hpd[1] = false;
858				break;
859			case RADEON_HPD_3:
860				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
861				rdev->irq.hpd[2] = false;
862				break;
863			default:
864				break;
865			}
866		}
867	}
868}
869
870/*
871 * R600 PCIE GART
872 */
873void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
874{
875	unsigned i;
876	u32 tmp;
877
878	/* flush hdp cache so updates hit vram */
879	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
880	    !(rdev->flags & RADEON_IS_AGP)) {
881		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
882		u32 tmp;
883
884		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
885		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
886		 * This seems to cause problems on some AGP cards. Just use the old
887		 * method for them.
888		 */
889		WREG32(HDP_DEBUG1, 0);
890		tmp = readl((void __iomem *)ptr);
891	} else
892		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
893
894	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
895	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
896	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
897	for (i = 0; i < rdev->usec_timeout; i++) {
898		/* read MC_STATUS */
899		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
900		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
901		if (tmp == 2) {
902			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
903			return;
904		}
905		if (tmp) {
906			return;
907		}
908		udelay(1);
909	}
910}
911
912int r600_pcie_gart_init(struct radeon_device *rdev)
913{
914	int r;
915
916	if (rdev->gart.table.vram.robj) {
917		WARN(1, "R600 PCIE GART already initialized.\n");
918		return 0;
919	}
920	/* Initialize common gart structure */
921	r = radeon_gart_init(rdev);
922	if (r)
923		return r;
924	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
925	return radeon_gart_table_vram_alloc(rdev);
926}
927
928int r600_pcie_gart_enable(struct radeon_device *rdev)
929{
930	u32 tmp;
931	int r, i;
932
933	if (rdev->gart.table.vram.robj == NULL) {
934		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
935		return -EINVAL;
936	}
937	r = radeon_gart_table_vram_pin(rdev);
938	if (r)
939		return r;
940	radeon_gart_restore(rdev);
941
942	/* Setup L2 cache */
943	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
944				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
945				EFFECTIVE_L2_QUEUE_SIZE(7));
946	WREG32(VM_L2_CNTL2, 0);
947	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
948	/* Setup TLB control */
949	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
950		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
951		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
952		ENABLE_WAIT_L2_QUERY;
953	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
954	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
955	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
956	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
957	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
958	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
959	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
960	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
961	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
962	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
963	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
964	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
965	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
966	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
967	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
968	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
969	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
970	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
971				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
972	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
973			(u32)(rdev->dummy_page.addr >> 12));
974	for (i = 1; i < 7; i++)
975		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
976
977	r600_pcie_gart_tlb_flush(rdev);
978	rdev->gart.ready = true;
979	return 0;
980}
981
982void r600_pcie_gart_disable(struct radeon_device *rdev)
983{
984	u32 tmp;
985	int i, r;
986
987	/* Disable all tables */
988	for (i = 0; i < 7; i++)
989		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
990
991	/* Disable L2 cache */
992	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
993				EFFECTIVE_L2_QUEUE_SIZE(7));
994	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
995	/* Setup L1 TLB control */
996	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
997		ENABLE_WAIT_L2_QUERY;
998	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
999	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1000	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1001	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1002	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1003	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1004	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1005	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1006	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1007	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1008	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1009	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1010	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1011	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1012	if (rdev->gart.table.vram.robj) {
1013		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1014		if (likely(r == 0)) {
1015			radeon_bo_kunmap(rdev->gart.table.vram.robj);
1016			radeon_bo_unpin(rdev->gart.table.vram.robj);
1017			radeon_bo_unreserve(rdev->gart.table.vram.robj);
1018		}
1019	}
1020}
1021
1022void r600_pcie_gart_fini(struct radeon_device *rdev)
1023{
1024	radeon_gart_fini(rdev);
1025	r600_pcie_gart_disable(rdev);
1026	radeon_gart_table_vram_free(rdev);
1027}
1028
1029void r600_agp_enable(struct radeon_device *rdev)
1030{
1031	u32 tmp;
1032	int i;
1033
1034	/* Setup L2 cache */
1035	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1036				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1037				EFFECTIVE_L2_QUEUE_SIZE(7));
1038	WREG32(VM_L2_CNTL2, 0);
1039	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1040	/* Setup TLB control */
1041	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1042		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1043		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1044		ENABLE_WAIT_L2_QUERY;
1045	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1046	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1047	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1048	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1049	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1050	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1051	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1052	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1053	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1054	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1055	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1056	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1057	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1058	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1059	for (i = 0; i < 7; i++)
1060		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1061}
1062
1063int r600_mc_wait_for_idle(struct radeon_device *rdev)
1064{
1065	unsigned i;
1066	u32 tmp;
1067
1068	for (i = 0; i < rdev->usec_timeout; i++) {
1069		/* read MC_STATUS */
1070		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1071		if (!tmp)
1072			return 0;
1073		udelay(1);
1074	}
1075	return -1;
1076}
1077
1078static void r600_mc_program(struct radeon_device *rdev)
1079{
1080	struct rv515_mc_save save;
1081	u32 tmp;
1082	int i, j;
1083
1084	/* Initialize HDP */
1085	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1086		WREG32((0x2c14 + j), 0x00000000);
1087		WREG32((0x2c18 + j), 0x00000000);
1088		WREG32((0x2c1c + j), 0x00000000);
1089		WREG32((0x2c20 + j), 0x00000000);
1090		WREG32((0x2c24 + j), 0x00000000);
1091	}
1092	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1093
1094	rv515_mc_stop(rdev, &save);
1095	if (r600_mc_wait_for_idle(rdev)) {
1096		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1097	}
1098	/* Lockout access through VGA aperture (doesn't exist before R600) */
1099	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1100	/* Update configuration */
1101	if (rdev->flags & RADEON_IS_AGP) {
1102		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1103			/* VRAM before AGP */
1104			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1105				rdev->mc.vram_start >> 12);
1106			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1107				rdev->mc.gtt_end >> 12);
1108		} else {
1109			/* VRAM after AGP */
1110			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1111				rdev->mc.gtt_start >> 12);
1112			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1113				rdev->mc.vram_end >> 12);
1114		}
1115	} else {
1116		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1117		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1118	}
1119	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1120	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1121	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1122	WREG32(MC_VM_FB_LOCATION, tmp);
1123	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1124	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1125	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1126	if (rdev->flags & RADEON_IS_AGP) {
1127		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1128		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1129		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1130	} else {
1131		WREG32(MC_VM_AGP_BASE, 0);
1132		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1133		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1134	}
1135	if (r600_mc_wait_for_idle(rdev)) {
1136		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1137	}
1138	rv515_mc_resume(rdev, &save);
1139	/* we need to own VRAM, so turn off the VGA renderer here
1140	 * to stop it overwriting our objects */
1141	rv515_vga_render_disable(rdev);
1142}
1143
1144/**
1145 * r600_vram_gtt_location - try to find VRAM & GTT location
1146 * @rdev: radeon device structure holding all necessary informations
1147 * @mc: memory controller structure holding memory informations
1148 *
1149 * Function will place try to place VRAM at same place as in CPU (PCI)
1150 * address space as some GPU seems to have issue when we reprogram at
1151 * different address space.
1152 *
1153 * If there is not enough space to fit the unvisible VRAM after the
1154 * aperture then we limit the VRAM size to the aperture.
1155 *
1156 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1157 * them to be in one from GPU point of view so that we can program GPU to
1158 * catch access outside them (weird GPU policy see ??).
1159 *
1160 * This function will never fails, worst case are limiting VRAM or GTT.
1161 *
1162 * Note: GTT start, end, size should be initialized before calling this
1163 * function on AGP platform.
1164 */
1165void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1166{
1167	u64 size_bf, size_af;
1168
1169	if (mc->mc_vram_size > 0xE0000000) {
1170		/* leave room for at least 512M GTT */
1171		dev_warn(rdev->dev, "limiting VRAM\n");
1172		mc->real_vram_size = 0xE0000000;
1173		mc->mc_vram_size = 0xE0000000;
1174	}
1175	if (rdev->flags & RADEON_IS_AGP) {
1176		size_bf = mc->gtt_start;
1177		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1178		if (size_bf > size_af) {
1179			if (mc->mc_vram_size > size_bf) {
1180				dev_warn(rdev->dev, "limiting VRAM\n");
1181				mc->real_vram_size = size_bf;
1182				mc->mc_vram_size = size_bf;
1183			}
1184			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1185		} else {
1186			if (mc->mc_vram_size > size_af) {
1187				dev_warn(rdev->dev, "limiting VRAM\n");
1188				mc->real_vram_size = size_af;
1189				mc->mc_vram_size = size_af;
1190			}
1191			mc->vram_start = mc->gtt_end;
1192		}
1193		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1194		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1195				mc->mc_vram_size >> 20, mc->vram_start,
1196				mc->vram_end, mc->real_vram_size >> 20);
1197	} else {
1198		u64 base = 0;
1199		if (rdev->flags & RADEON_IS_IGP) {
1200			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1201			base <<= 24;
1202		}
1203		radeon_vram_location(rdev, &rdev->mc, base);
1204		rdev->mc.gtt_base_align = 0;
1205		radeon_gtt_location(rdev, mc);
1206	}
1207}
1208
1209int r600_mc_init(struct radeon_device *rdev)
1210{
1211	u32 tmp;
1212	int chansize, numchan;
1213
1214	/* Get VRAM informations */
1215	rdev->mc.vram_is_ddr = true;
1216	tmp = RREG32(RAMCFG);
1217	if (tmp & CHANSIZE_OVERRIDE) {
1218		chansize = 16;
1219	} else if (tmp & CHANSIZE_MASK) {
1220		chansize = 64;
1221	} else {
1222		chansize = 32;
1223	}
1224	tmp = RREG32(CHMAP);
1225	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1226	case 0:
1227	default:
1228		numchan = 1;
1229		break;
1230	case 1:
1231		numchan = 2;
1232		break;
1233	case 2:
1234		numchan = 4;
1235		break;
1236	case 3:
1237		numchan = 8;
1238		break;
1239	}
1240	rdev->mc.vram_width = numchan * chansize;
1241	/* Could aper size report 0 ? */
1242	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1243	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1244	/* Setup GPU memory space */
1245	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1246	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1247	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1248	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1249	r600_vram_gtt_location(rdev, &rdev->mc);
1250
1251	if (rdev->flags & RADEON_IS_IGP) {
1252		rs690_pm_info(rdev);
1253		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1254	}
1255	radeon_update_bandwidth_info(rdev);
1256	return 0;
1257}
1258
1259/* We doesn't check that the GPU really needs a reset we simply do the
1260 * reset, it's up to the caller to determine if the GPU needs one. We
1261 * might add an helper function to check that.
1262 */
1263int r600_gpu_soft_reset(struct radeon_device *rdev)
1264{
1265	struct rv515_mc_save save;
1266	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1267				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1268				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1269				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1270				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1271				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1272				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1273				S_008010_GUI_ACTIVE(1);
1274	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1275			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1276			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1277			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1278			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1279			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1280			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1281			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1282	u32 tmp;
1283
1284	dev_info(rdev->dev, "GPU softreset \n");
1285	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1286		RREG32(R_008010_GRBM_STATUS));
1287	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1288		RREG32(R_008014_GRBM_STATUS2));
1289	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1290		RREG32(R_000E50_SRBM_STATUS));
1291	rv515_mc_stop(rdev, &save);
1292	if (r600_mc_wait_for_idle(rdev)) {
1293		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1294	}
1295	/* Disable CP parsing/prefetching */
1296	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1297	/* Check if any of the rendering block is busy and reset it */
1298	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1299	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1300		tmp = S_008020_SOFT_RESET_CR(1) |
1301			S_008020_SOFT_RESET_DB(1) |
1302			S_008020_SOFT_RESET_CB(1) |
1303			S_008020_SOFT_RESET_PA(1) |
1304			S_008020_SOFT_RESET_SC(1) |
1305			S_008020_SOFT_RESET_SMX(1) |
1306			S_008020_SOFT_RESET_SPI(1) |
1307			S_008020_SOFT_RESET_SX(1) |
1308			S_008020_SOFT_RESET_SH(1) |
1309			S_008020_SOFT_RESET_TC(1) |
1310			S_008020_SOFT_RESET_TA(1) |
1311			S_008020_SOFT_RESET_VC(1) |
1312			S_008020_SOFT_RESET_VGT(1);
1313		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1314		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1315		RREG32(R_008020_GRBM_SOFT_RESET);
1316		mdelay(15);
1317		WREG32(R_008020_GRBM_SOFT_RESET, 0);
1318	}
1319	/* Reset CP (we always reset CP) */
1320	tmp = S_008020_SOFT_RESET_CP(1);
1321	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1322	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1323	RREG32(R_008020_GRBM_SOFT_RESET);
1324	mdelay(15);
1325	WREG32(R_008020_GRBM_SOFT_RESET, 0);
1326	/* Wait a little for things to settle down */
1327	mdelay(1);
1328	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1329		RREG32(R_008010_GRBM_STATUS));
1330	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1331		RREG32(R_008014_GRBM_STATUS2));
1332	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1333		RREG32(R_000E50_SRBM_STATUS));
1334	rv515_mc_resume(rdev, &save);
1335	return 0;
1336}
1337
1338bool r600_gpu_is_lockup(struct radeon_device *rdev)
1339{
1340	u32 srbm_status;
1341	u32 grbm_status;
1342	u32 grbm_status2;
1343	int r;
1344
1345	srbm_status = RREG32(R_000E50_SRBM_STATUS);
1346	grbm_status = RREG32(R_008010_GRBM_STATUS);
1347	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1348	if (!G_008010_GUI_ACTIVE(grbm_status)) {
1349		r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1350		return false;
1351	}
1352	/* force CP activities */
1353	r = radeon_ring_lock(rdev, 2);
1354	if (!r) {
1355		/* PACKET2 NOP */
1356		radeon_ring_write(rdev, 0x80000000);
1357		radeon_ring_write(rdev, 0x80000000);
1358		radeon_ring_unlock_commit(rdev);
1359	}
1360	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1361	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1362}
1363
1364int r600_asic_reset(struct radeon_device *rdev)
1365{
1366	return r600_gpu_soft_reset(rdev);
1367}
1368
1369static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1370					     u32 num_backends,
1371					     u32 backend_disable_mask)
1372{
1373	u32 backend_map = 0;
1374	u32 enabled_backends_mask;
1375	u32 enabled_backends_count;
1376	u32 cur_pipe;
1377	u32 swizzle_pipe[R6XX_MAX_PIPES];
1378	u32 cur_backend;
1379	u32 i;
1380
1381	if (num_tile_pipes > R6XX_MAX_PIPES)
1382		num_tile_pipes = R6XX_MAX_PIPES;
1383	if (num_tile_pipes < 1)
1384		num_tile_pipes = 1;
1385	if (num_backends > R6XX_MAX_BACKENDS)
1386		num_backends = R6XX_MAX_BACKENDS;
1387	if (num_backends < 1)
1388		num_backends = 1;
1389
1390	enabled_backends_mask = 0;
1391	enabled_backends_count = 0;
1392	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1393		if (((backend_disable_mask >> i) & 1) == 0) {
1394			enabled_backends_mask |= (1 << i);
1395			++enabled_backends_count;
1396		}
1397		if (enabled_backends_count == num_backends)
1398			break;
1399	}
1400
1401	if (enabled_backends_count == 0) {
1402		enabled_backends_mask = 1;
1403		enabled_backends_count = 1;
1404	}
1405
1406	if (enabled_backends_count != num_backends)
1407		num_backends = enabled_backends_count;
1408
1409	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1410	switch (num_tile_pipes) {
1411	case 1:
1412		swizzle_pipe[0] = 0;
1413		break;
1414	case 2:
1415		swizzle_pipe[0] = 0;
1416		swizzle_pipe[1] = 1;
1417		break;
1418	case 3:
1419		swizzle_pipe[0] = 0;
1420		swizzle_pipe[1] = 1;
1421		swizzle_pipe[2] = 2;
1422		break;
1423	case 4:
1424		swizzle_pipe[0] = 0;
1425		swizzle_pipe[1] = 1;
1426		swizzle_pipe[2] = 2;
1427		swizzle_pipe[3] = 3;
1428		break;
1429	case 5:
1430		swizzle_pipe[0] = 0;
1431		swizzle_pipe[1] = 1;
1432		swizzle_pipe[2] = 2;
1433		swizzle_pipe[3] = 3;
1434		swizzle_pipe[4] = 4;
1435		break;
1436	case 6:
1437		swizzle_pipe[0] = 0;
1438		swizzle_pipe[1] = 2;
1439		swizzle_pipe[2] = 4;
1440		swizzle_pipe[3] = 5;
1441		swizzle_pipe[4] = 1;
1442		swizzle_pipe[5] = 3;
1443		break;
1444	case 7:
1445		swizzle_pipe[0] = 0;
1446		swizzle_pipe[1] = 2;
1447		swizzle_pipe[2] = 4;
1448		swizzle_pipe[3] = 6;
1449		swizzle_pipe[4] = 1;
1450		swizzle_pipe[5] = 3;
1451		swizzle_pipe[6] = 5;
1452		break;
1453	case 8:
1454		swizzle_pipe[0] = 0;
1455		swizzle_pipe[1] = 2;
1456		swizzle_pipe[2] = 4;
1457		swizzle_pipe[3] = 6;
1458		swizzle_pipe[4] = 1;
1459		swizzle_pipe[5] = 3;
1460		swizzle_pipe[6] = 5;
1461		swizzle_pipe[7] = 7;
1462		break;
1463	}
1464
1465	cur_backend = 0;
1466	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1467		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1468			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1469
1470		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1471
1472		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1473	}
1474
1475	return backend_map;
1476}
1477
1478int r600_count_pipe_bits(uint32_t val)
1479{
1480	int i, ret = 0;
1481
1482	for (i = 0; i < 32; i++) {
1483		ret += val & 1;
1484		val >>= 1;
1485	}
1486	return ret;
1487}
1488
1489void r600_gpu_init(struct radeon_device *rdev)
1490{
1491	u32 tiling_config;
1492	u32 ramcfg;
1493	u32 backend_map;
1494	u32 cc_rb_backend_disable;
1495	u32 cc_gc_shader_pipe_config;
1496	u32 tmp;
1497	int i, j;
1498	u32 sq_config;
1499	u32 sq_gpr_resource_mgmt_1 = 0;
1500	u32 sq_gpr_resource_mgmt_2 = 0;
1501	u32 sq_thread_resource_mgmt = 0;
1502	u32 sq_stack_resource_mgmt_1 = 0;
1503	u32 sq_stack_resource_mgmt_2 = 0;
1504
1505	switch (rdev->family) {
1506	case CHIP_R600:
1507		rdev->config.r600.max_pipes = 4;
1508		rdev->config.r600.max_tile_pipes = 8;
1509		rdev->config.r600.max_simds = 4;
1510		rdev->config.r600.max_backends = 4;
1511		rdev->config.r600.max_gprs = 256;
1512		rdev->config.r600.max_threads = 192;
1513		rdev->config.r600.max_stack_entries = 256;
1514		rdev->config.r600.max_hw_contexts = 8;
1515		rdev->config.r600.max_gs_threads = 16;
1516		rdev->config.r600.sx_max_export_size = 128;
1517		rdev->config.r600.sx_max_export_pos_size = 16;
1518		rdev->config.r600.sx_max_export_smx_size = 128;
1519		rdev->config.r600.sq_num_cf_insts = 2;
1520		break;
1521	case CHIP_RV630:
1522	case CHIP_RV635:
1523		rdev->config.r600.max_pipes = 2;
1524		rdev->config.r600.max_tile_pipes = 2;
1525		rdev->config.r600.max_simds = 3;
1526		rdev->config.r600.max_backends = 1;
1527		rdev->config.r600.max_gprs = 128;
1528		rdev->config.r600.max_threads = 192;
1529		rdev->config.r600.max_stack_entries = 128;
1530		rdev->config.r600.max_hw_contexts = 8;
1531		rdev->config.r600.max_gs_threads = 4;
1532		rdev->config.r600.sx_max_export_size = 128;
1533		rdev->config.r600.sx_max_export_pos_size = 16;
1534		rdev->config.r600.sx_max_export_smx_size = 128;
1535		rdev->config.r600.sq_num_cf_insts = 2;
1536		break;
1537	case CHIP_RV610:
1538	case CHIP_RV620:
1539	case CHIP_RS780:
1540	case CHIP_RS880:
1541		rdev->config.r600.max_pipes = 1;
1542		rdev->config.r600.max_tile_pipes = 1;
1543		rdev->config.r600.max_simds = 2;
1544		rdev->config.r600.max_backends = 1;
1545		rdev->config.r600.max_gprs = 128;
1546		rdev->config.r600.max_threads = 192;
1547		rdev->config.r600.max_stack_entries = 128;
1548		rdev->config.r600.max_hw_contexts = 4;
1549		rdev->config.r600.max_gs_threads = 4;
1550		rdev->config.r600.sx_max_export_size = 128;
1551		rdev->config.r600.sx_max_export_pos_size = 16;
1552		rdev->config.r600.sx_max_export_smx_size = 128;
1553		rdev->config.r600.sq_num_cf_insts = 1;
1554		break;
1555	case CHIP_RV670:
1556		rdev->config.r600.max_pipes = 4;
1557		rdev->config.r600.max_tile_pipes = 4;
1558		rdev->config.r600.max_simds = 4;
1559		rdev->config.r600.max_backends = 4;
1560		rdev->config.r600.max_gprs = 192;
1561		rdev->config.r600.max_threads = 192;
1562		rdev->config.r600.max_stack_entries = 256;
1563		rdev->config.r600.max_hw_contexts = 8;
1564		rdev->config.r600.max_gs_threads = 16;
1565		rdev->config.r600.sx_max_export_size = 128;
1566		rdev->config.r600.sx_max_export_pos_size = 16;
1567		rdev->config.r600.sx_max_export_smx_size = 128;
1568		rdev->config.r600.sq_num_cf_insts = 2;
1569		break;
1570	default:
1571		break;
1572	}
1573
1574	/* Initialize HDP */
1575	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1576		WREG32((0x2c14 + j), 0x00000000);
1577		WREG32((0x2c18 + j), 0x00000000);
1578		WREG32((0x2c1c + j), 0x00000000);
1579		WREG32((0x2c20 + j), 0x00000000);
1580		WREG32((0x2c24 + j), 0x00000000);
1581	}
1582
1583	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1584
1585	/* Setup tiling */
1586	tiling_config = 0;
1587	ramcfg = RREG32(RAMCFG);
1588	switch (rdev->config.r600.max_tile_pipes) {
1589	case 1:
1590		tiling_config |= PIPE_TILING(0);
1591		break;
1592	case 2:
1593		tiling_config |= PIPE_TILING(1);
1594		break;
1595	case 4:
1596		tiling_config |= PIPE_TILING(2);
1597		break;
1598	case 8:
1599		tiling_config |= PIPE_TILING(3);
1600		break;
1601	default:
1602		break;
1603	}
1604	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1605	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1606	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1607	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1608	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1609		rdev->config.r600.tiling_group_size = 512;
1610	else
1611		rdev->config.r600.tiling_group_size = 256;
1612	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1613	if (tmp > 3) {
1614		tiling_config |= ROW_TILING(3);
1615		tiling_config |= SAMPLE_SPLIT(3);
1616	} else {
1617		tiling_config |= ROW_TILING(tmp);
1618		tiling_config |= SAMPLE_SPLIT(tmp);
1619	}
1620	tiling_config |= BANK_SWAPS(1);
1621
1622	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1623	cc_rb_backend_disable |=
1624		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1625
1626	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1627	cc_gc_shader_pipe_config |=
1628		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1629	cc_gc_shader_pipe_config |=
1630		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1631
1632	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1633							(R6XX_MAX_BACKENDS -
1634							 r600_count_pipe_bits((cc_rb_backend_disable &
1635									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1636							(cc_rb_backend_disable >> 16));
1637	rdev->config.r600.tile_config = tiling_config;
1638	tiling_config |= BACKEND_MAP(backend_map);
1639	WREG32(GB_TILING_CONFIG, tiling_config);
1640	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1641	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1642
1643	/* Setup pipes */
1644	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1645	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1646	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1647
1648	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1649	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1650	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1651
1652	/* Setup some CP states */
1653	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1654	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1655
1656	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1657			     SYNC_WALKER | SYNC_ALIGNER));
1658	/* Setup various GPU states */
1659	if (rdev->family == CHIP_RV670)
1660		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1661
1662	tmp = RREG32(SX_DEBUG_1);
1663	tmp |= SMX_EVENT_RELEASE;
1664	if ((rdev->family > CHIP_R600))
1665		tmp |= ENABLE_NEW_SMX_ADDRESS;
1666	WREG32(SX_DEBUG_1, tmp);
1667
1668	if (((rdev->family) == CHIP_R600) ||
1669	    ((rdev->family) == CHIP_RV630) ||
1670	    ((rdev->family) == CHIP_RV610) ||
1671	    ((rdev->family) == CHIP_RV620) ||
1672	    ((rdev->family) == CHIP_RS780) ||
1673	    ((rdev->family) == CHIP_RS880)) {
1674		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1675	} else {
1676		WREG32(DB_DEBUG, 0);
1677	}
1678	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1679			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1680
1681	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1682	WREG32(VGT_NUM_INSTANCES, 0);
1683
1684	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1685	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1686
1687	tmp = RREG32(SQ_MS_FIFO_SIZES);
1688	if (((rdev->family) == CHIP_RV610) ||
1689	    ((rdev->family) == CHIP_RV620) ||
1690	    ((rdev->family) == CHIP_RS780) ||
1691	    ((rdev->family) == CHIP_RS880)) {
1692		tmp = (CACHE_FIFO_SIZE(0xa) |
1693		       FETCH_FIFO_HIWATER(0xa) |
1694		       DONE_FIFO_HIWATER(0xe0) |
1695		       ALU_UPDATE_FIFO_HIWATER(0x8));
1696	} else if (((rdev->family) == CHIP_R600) ||
1697		   ((rdev->family) == CHIP_RV630)) {
1698		tmp &= ~DONE_FIFO_HIWATER(0xff);
1699		tmp |= DONE_FIFO_HIWATER(0x4);
1700	}
1701	WREG32(SQ_MS_FIFO_SIZES, tmp);
1702
1703	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1704	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1705	 */
1706	sq_config = RREG32(SQ_CONFIG);
1707	sq_config &= ~(PS_PRIO(3) |
1708		       VS_PRIO(3) |
1709		       GS_PRIO(3) |
1710		       ES_PRIO(3));
1711	sq_config |= (DX9_CONSTS |
1712		      VC_ENABLE |
1713		      PS_PRIO(0) |
1714		      VS_PRIO(1) |
1715		      GS_PRIO(2) |
1716		      ES_PRIO(3));
1717
1718	if ((rdev->family) == CHIP_R600) {
1719		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1720					  NUM_VS_GPRS(124) |
1721					  NUM_CLAUSE_TEMP_GPRS(4));
1722		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1723					  NUM_ES_GPRS(0));
1724		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1725					   NUM_VS_THREADS(48) |
1726					   NUM_GS_THREADS(4) |
1727					   NUM_ES_THREADS(4));
1728		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1729					    NUM_VS_STACK_ENTRIES(128));
1730		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1731					    NUM_ES_STACK_ENTRIES(0));
1732	} else if (((rdev->family) == CHIP_RV610) ||
1733		   ((rdev->family) == CHIP_RV620) ||
1734		   ((rdev->family) == CHIP_RS780) ||
1735		   ((rdev->family) == CHIP_RS880)) {
1736		/* no vertex cache */
1737		sq_config &= ~VC_ENABLE;
1738
1739		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1740					  NUM_VS_GPRS(44) |
1741					  NUM_CLAUSE_TEMP_GPRS(2));
1742		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1743					  NUM_ES_GPRS(17));
1744		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1745					   NUM_VS_THREADS(78) |
1746					   NUM_GS_THREADS(4) |
1747					   NUM_ES_THREADS(31));
1748		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1749					    NUM_VS_STACK_ENTRIES(40));
1750		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1751					    NUM_ES_STACK_ENTRIES(16));
1752	} else if (((rdev->family) == CHIP_RV630) ||
1753		   ((rdev->family) == CHIP_RV635)) {
1754		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1755					  NUM_VS_GPRS(44) |
1756					  NUM_CLAUSE_TEMP_GPRS(2));
1757		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1758					  NUM_ES_GPRS(18));
1759		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1760					   NUM_VS_THREADS(78) |
1761					   NUM_GS_THREADS(4) |
1762					   NUM_ES_THREADS(31));
1763		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1764					    NUM_VS_STACK_ENTRIES(40));
1765		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1766					    NUM_ES_STACK_ENTRIES(16));
1767	} else if ((rdev->family) == CHIP_RV670) {
1768		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1769					  NUM_VS_GPRS(44) |
1770					  NUM_CLAUSE_TEMP_GPRS(2));
1771		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1772					  NUM_ES_GPRS(17));
1773		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1774					   NUM_VS_THREADS(78) |
1775					   NUM_GS_THREADS(4) |
1776					   NUM_ES_THREADS(31));
1777		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1778					    NUM_VS_STACK_ENTRIES(64));
1779		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1780					    NUM_ES_STACK_ENTRIES(64));
1781	}
1782
1783	WREG32(SQ_CONFIG, sq_config);
1784	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1785	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1786	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1787	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1788	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1789
1790	if (((rdev->family) == CHIP_RV610) ||
1791	    ((rdev->family) == CHIP_RV620) ||
1792	    ((rdev->family) == CHIP_RS780) ||
1793	    ((rdev->family) == CHIP_RS880)) {
1794		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1795	} else {
1796		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1797	}
1798
1799	/* More default values. 2D/3D driver should adjust as needed */
1800	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1801					 S1_X(0x4) | S1_Y(0xc)));
1802	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1803					 S1_X(0x2) | S1_Y(0x2) |
1804					 S2_X(0xa) | S2_Y(0x6) |
1805					 S3_X(0x6) | S3_Y(0xa)));
1806	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1807					     S1_X(0x4) | S1_Y(0xc) |
1808					     S2_X(0x1) | S2_Y(0x6) |
1809					     S3_X(0xa) | S3_Y(0xe)));
1810	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1811					     S5_X(0x0) | S5_Y(0x0) |
1812					     S6_X(0xb) | S6_Y(0x4) |
1813					     S7_X(0x7) | S7_Y(0x8)));
1814
1815	WREG32(VGT_STRMOUT_EN, 0);
1816	tmp = rdev->config.r600.max_pipes * 16;
1817	switch (rdev->family) {
1818	case CHIP_RV610:
1819	case CHIP_RV620:
1820	case CHIP_RS780:
1821	case CHIP_RS880:
1822		tmp += 32;
1823		break;
1824	case CHIP_RV670:
1825		tmp += 128;
1826		break;
1827	default:
1828		break;
1829	}
1830	if (tmp > 256) {
1831		tmp = 256;
1832	}
1833	WREG32(VGT_ES_PER_GS, 128);
1834	WREG32(VGT_GS_PER_ES, tmp);
1835	WREG32(VGT_GS_PER_VS, 2);
1836	WREG32(VGT_GS_VERTEX_REUSE, 16);
1837
1838	/* more default values. 2D/3D driver should adjust as needed */
1839	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1840	WREG32(VGT_STRMOUT_EN, 0);
1841	WREG32(SX_MISC, 0);
1842	WREG32(PA_SC_MODE_CNTL, 0);
1843	WREG32(PA_SC_AA_CONFIG, 0);
1844	WREG32(PA_SC_LINE_STIPPLE, 0);
1845	WREG32(SPI_INPUT_Z, 0);
1846	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1847	WREG32(CB_COLOR7_FRAG, 0);
1848
1849	/* Clear render buffer base addresses */
1850	WREG32(CB_COLOR0_BASE, 0);
1851	WREG32(CB_COLOR1_BASE, 0);
1852	WREG32(CB_COLOR2_BASE, 0);
1853	WREG32(CB_COLOR3_BASE, 0);
1854	WREG32(CB_COLOR4_BASE, 0);
1855	WREG32(CB_COLOR5_BASE, 0);
1856	WREG32(CB_COLOR6_BASE, 0);
1857	WREG32(CB_COLOR7_BASE, 0);
1858	WREG32(CB_COLOR7_FRAG, 0);
1859
1860	switch (rdev->family) {
1861	case CHIP_RV610:
1862	case CHIP_RV620:
1863	case CHIP_RS780:
1864	case CHIP_RS880:
1865		tmp = TC_L2_SIZE(8);
1866		break;
1867	case CHIP_RV630:
1868	case CHIP_RV635:
1869		tmp = TC_L2_SIZE(4);
1870		break;
1871	case CHIP_R600:
1872		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1873		break;
1874	default:
1875		tmp = TC_L2_SIZE(0);
1876		break;
1877	}
1878	WREG32(TC_CNTL, tmp);
1879
1880	tmp = RREG32(HDP_HOST_PATH_CNTL);
1881	WREG32(HDP_HOST_PATH_CNTL, tmp);
1882
1883	tmp = RREG32(ARB_POP);
1884	tmp |= ENABLE_TC128;
1885	WREG32(ARB_POP, tmp);
1886
1887	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1888	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1889			       NUM_CLIP_SEQ(3)));
1890	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1891}
1892
1893
1894/*
1895 * Indirect registers accessor
1896 */
1897u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1898{
1899	u32 r;
1900
1901	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1902	(void)RREG32(PCIE_PORT_INDEX);
1903	r = RREG32(PCIE_PORT_DATA);
1904	return r;
1905}
1906
1907void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1908{
1909	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1910	(void)RREG32(PCIE_PORT_INDEX);
1911	WREG32(PCIE_PORT_DATA, (v));
1912	(void)RREG32(PCIE_PORT_DATA);
1913}
1914
1915/*
1916 * CP & Ring
1917 */
1918void r600_cp_stop(struct radeon_device *rdev)
1919{
1920	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1921	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1922}
1923
1924int r600_init_microcode(struct radeon_device *rdev)
1925{
1926	struct platform_device *pdev;
1927	const char *chip_name;
1928	const char *rlc_chip_name;
1929	size_t pfp_req_size, me_req_size, rlc_req_size;
1930	char fw_name[30];
1931	int err;
1932
1933	DRM_DEBUG("\n");
1934
1935	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1936	err = IS_ERR(pdev);
1937	if (err) {
1938		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1939		return -EINVAL;
1940	}
1941
1942	switch (rdev->family) {
1943	case CHIP_R600:
1944		chip_name = "R600";
1945		rlc_chip_name = "R600";
1946		break;
1947	case CHIP_RV610:
1948		chip_name = "RV610";
1949		rlc_chip_name = "R600";
1950		break;
1951	case CHIP_RV630:
1952		chip_name = "RV630";
1953		rlc_chip_name = "R600";
1954		break;
1955	case CHIP_RV620:
1956		chip_name = "RV620";
1957		rlc_chip_name = "R600";
1958		break;
1959	case CHIP_RV635:
1960		chip_name = "RV635";
1961		rlc_chip_name = "R600";
1962		break;
1963	case CHIP_RV670:
1964		chip_name = "RV670";
1965		rlc_chip_name = "R600";
1966		break;
1967	case CHIP_RS780:
1968	case CHIP_RS880:
1969		chip_name = "RS780";
1970		rlc_chip_name = "R600";
1971		break;
1972	case CHIP_RV770:
1973		chip_name = "RV770";
1974		rlc_chip_name = "R700";
1975		break;
1976	case CHIP_RV730:
1977	case CHIP_RV740:
1978		chip_name = "RV730";
1979		rlc_chip_name = "R700";
1980		break;
1981	case CHIP_RV710:
1982		chip_name = "RV710";
1983		rlc_chip_name = "R700";
1984		break;
1985	case CHIP_CEDAR:
1986		chip_name = "CEDAR";
1987		rlc_chip_name = "CEDAR";
1988		break;
1989	case CHIP_REDWOOD:
1990		chip_name = "REDWOOD";
1991		rlc_chip_name = "REDWOOD";
1992		break;
1993	case CHIP_JUNIPER:
1994		chip_name = "JUNIPER";
1995		rlc_chip_name = "JUNIPER";
1996		break;
1997	case CHIP_CYPRESS:
1998	case CHIP_HEMLOCK:
1999		chip_name = "CYPRESS";
2000		rlc_chip_name = "CYPRESS";
2001		break;
2002	default: BUG();
2003	}
2004
2005	if (rdev->family >= CHIP_CEDAR) {
2006		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2007		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2008		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2009	} else if (rdev->family >= CHIP_RV770) {
2010		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2011		me_req_size = R700_PM4_UCODE_SIZE * 4;
2012		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2013	} else {
2014		pfp_req_size = PFP_UCODE_SIZE * 4;
2015		me_req_size = PM4_UCODE_SIZE * 12;
2016		rlc_req_size = RLC_UCODE_SIZE * 4;
2017	}
2018
2019	DRM_INFO("Loading %s Microcode\n", chip_name);
2020
2021	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2022	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2023	if (err)
2024		goto out;
2025	if (rdev->pfp_fw->size != pfp_req_size) {
2026		printk(KERN_ERR
2027		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2028		       rdev->pfp_fw->size, fw_name);
2029		err = -EINVAL;
2030		goto out;
2031	}
2032
2033	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2034	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2035	if (err)
2036		goto out;
2037	if (rdev->me_fw->size != me_req_size) {
2038		printk(KERN_ERR
2039		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2040		       rdev->me_fw->size, fw_name);
2041		err = -EINVAL;
2042	}
2043
2044	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2045	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2046	if (err)
2047		goto out;
2048	if (rdev->rlc_fw->size != rlc_req_size) {
2049		printk(KERN_ERR
2050		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2051		       rdev->rlc_fw->size, fw_name);
2052		err = -EINVAL;
2053	}
2054
2055out:
2056	platform_device_unregister(pdev);
2057
2058	if (err) {
2059		if (err != -EINVAL)
2060			printk(KERN_ERR
2061			       "r600_cp: Failed to load firmware \"%s\"\n",
2062			       fw_name);
2063		release_firmware(rdev->pfp_fw);
2064		rdev->pfp_fw = NULL;
2065		release_firmware(rdev->me_fw);
2066		rdev->me_fw = NULL;
2067		release_firmware(rdev->rlc_fw);
2068		rdev->rlc_fw = NULL;
2069	}
2070	return err;
2071}
2072
2073static int r600_cp_load_microcode(struct radeon_device *rdev)
2074{
2075	const __be32 *fw_data;
2076	int i;
2077
2078	if (!rdev->me_fw || !rdev->pfp_fw)
2079		return -EINVAL;
2080
2081	r600_cp_stop(rdev);
2082
2083	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2084
2085	/* Reset cp */
2086	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2087	RREG32(GRBM_SOFT_RESET);
2088	mdelay(15);
2089	WREG32(GRBM_SOFT_RESET, 0);
2090
2091	WREG32(CP_ME_RAM_WADDR, 0);
2092
2093	fw_data = (const __be32 *)rdev->me_fw->data;
2094	WREG32(CP_ME_RAM_WADDR, 0);
2095	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2096		WREG32(CP_ME_RAM_DATA,
2097		       be32_to_cpup(fw_data++));
2098
2099	fw_data = (const __be32 *)rdev->pfp_fw->data;
2100	WREG32(CP_PFP_UCODE_ADDR, 0);
2101	for (i = 0; i < PFP_UCODE_SIZE; i++)
2102		WREG32(CP_PFP_UCODE_DATA,
2103		       be32_to_cpup(fw_data++));
2104
2105	WREG32(CP_PFP_UCODE_ADDR, 0);
2106	WREG32(CP_ME_RAM_WADDR, 0);
2107	WREG32(CP_ME_RAM_RADDR, 0);
2108	return 0;
2109}
2110
2111int r600_cp_start(struct radeon_device *rdev)
2112{
2113	int r;
2114	uint32_t cp_me;
2115
2116	r = radeon_ring_lock(rdev, 7);
2117	if (r) {
2118		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2119		return r;
2120	}
2121	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2122	radeon_ring_write(rdev, 0x1);
2123	if (rdev->family >= CHIP_RV770) {
2124		radeon_ring_write(rdev, 0x0);
2125		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2126	} else {
2127		radeon_ring_write(rdev, 0x3);
2128		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2129	}
2130	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2131	radeon_ring_write(rdev, 0);
2132	radeon_ring_write(rdev, 0);
2133	radeon_ring_unlock_commit(rdev);
2134
2135	cp_me = 0xff;
2136	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2137	return 0;
2138}
2139
2140int r600_cp_resume(struct radeon_device *rdev)
2141{
2142	u32 tmp;
2143	u32 rb_bufsz;
2144	int r;
2145
2146	/* Reset cp */
2147	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2148	RREG32(GRBM_SOFT_RESET);
2149	mdelay(15);
2150	WREG32(GRBM_SOFT_RESET, 0);
2151
2152	/* Set ring buffer size */
2153	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2154	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2155#ifdef __BIG_ENDIAN
2156	tmp |= BUF_SWAP_32BIT;
2157#endif
2158	WREG32(CP_RB_CNTL, tmp);
2159	WREG32(CP_SEM_WAIT_TIMER, 0x4);
2160
2161	/* Set the write pointer delay */
2162	WREG32(CP_RB_WPTR_DELAY, 0);
2163
2164	/* Initialize the ring buffer's read and write pointers */
2165	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2166	WREG32(CP_RB_RPTR_WR, 0);
2167	WREG32(CP_RB_WPTR, 0);
2168	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2169	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2170	mdelay(1);
2171	WREG32(CP_RB_CNTL, tmp);
2172
2173	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2174	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2175
2176	rdev->cp.rptr = RREG32(CP_RB_RPTR);
2177	rdev->cp.wptr = RREG32(CP_RB_WPTR);
2178
2179	r600_cp_start(rdev);
2180	rdev->cp.ready = true;
2181	r = radeon_ring_test(rdev);
2182	if (r) {
2183		rdev->cp.ready = false;
2184		return r;
2185	}
2186	return 0;
2187}
2188
2189void r600_cp_commit(struct radeon_device *rdev)
2190{
2191	WREG32(CP_RB_WPTR, rdev->cp.wptr);
2192	(void)RREG32(CP_RB_WPTR);
2193}
2194
2195void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2196{
2197	u32 rb_bufsz;
2198
2199	/* Align ring size */
2200	rb_bufsz = drm_order(ring_size / 8);
2201	ring_size = (1 << (rb_bufsz + 1)) * 4;
2202	rdev->cp.ring_size = ring_size;
2203	rdev->cp.align_mask = 16 - 1;
2204}
2205
2206void r600_cp_fini(struct radeon_device *rdev)
2207{
2208	r600_cp_stop(rdev);
2209	radeon_ring_fini(rdev);
2210}
2211
2212
2213/*
2214 * GPU scratch registers helpers function.
2215 */
2216void r600_scratch_init(struct radeon_device *rdev)
2217{
2218	int i;
2219
2220	rdev->scratch.num_reg = 7;
2221	for (i = 0; i < rdev->scratch.num_reg; i++) {
2222		rdev->scratch.free[i] = true;
2223		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2224	}
2225}
2226
2227int r600_ring_test(struct radeon_device *rdev)
2228{
2229	uint32_t scratch;
2230	uint32_t tmp = 0;
2231	unsigned i;
2232	int r;
2233
2234	r = radeon_scratch_get(rdev, &scratch);
2235	if (r) {
2236		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2237		return r;
2238	}
2239	WREG32(scratch, 0xCAFEDEAD);
2240	r = radeon_ring_lock(rdev, 3);
2241	if (r) {
2242		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2243		radeon_scratch_free(rdev, scratch);
2244		return r;
2245	}
2246	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2247	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2248	radeon_ring_write(rdev, 0xDEADBEEF);
2249	radeon_ring_unlock_commit(rdev);
2250	for (i = 0; i < rdev->usec_timeout; i++) {
2251		tmp = RREG32(scratch);
2252		if (tmp == 0xDEADBEEF)
2253			break;
2254		DRM_UDELAY(1);
2255	}
2256	if (i < rdev->usec_timeout) {
2257		DRM_INFO("ring test succeeded in %d usecs\n", i);
2258	} else {
2259		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2260			  scratch, tmp);
2261		r = -EINVAL;
2262	}
2263	radeon_scratch_free(rdev, scratch);
2264	return r;
2265}
2266
2267void r600_wb_disable(struct radeon_device *rdev)
2268{
2269	int r;
2270
2271	WREG32(SCRATCH_UMSK, 0);
2272	if (rdev->wb.wb_obj) {
2273		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2274		if (unlikely(r != 0))
2275			return;
2276		radeon_bo_kunmap(rdev->wb.wb_obj);
2277		radeon_bo_unpin(rdev->wb.wb_obj);
2278		radeon_bo_unreserve(rdev->wb.wb_obj);
2279	}
2280}
2281
2282void r600_wb_fini(struct radeon_device *rdev)
2283{
2284	r600_wb_disable(rdev);
2285	if (rdev->wb.wb_obj) {
2286		radeon_bo_unref(&rdev->wb.wb_obj);
2287		rdev->wb.wb = NULL;
2288		rdev->wb.wb_obj = NULL;
2289	}
2290}
2291
2292int r600_wb_enable(struct radeon_device *rdev)
2293{
2294	int r;
2295
2296	if (rdev->wb.wb_obj == NULL) {
2297		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2298				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2299		if (r) {
2300			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2301			return r;
2302		}
2303		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2304		if (unlikely(r != 0)) {
2305			r600_wb_fini(rdev);
2306			return r;
2307		}
2308		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2309				&rdev->wb.gpu_addr);
2310		if (r) {
2311			radeon_bo_unreserve(rdev->wb.wb_obj);
2312			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2313			r600_wb_fini(rdev);
2314			return r;
2315		}
2316		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2317		radeon_bo_unreserve(rdev->wb.wb_obj);
2318		if (r) {
2319			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2320			r600_wb_fini(rdev);
2321			return r;
2322		}
2323	}
2324	WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2325	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2326	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2327	WREG32(SCRATCH_UMSK, 0xff);
2328	return 0;
2329}
2330
2331void r600_fence_ring_emit(struct radeon_device *rdev,
2332			  struct radeon_fence *fence)
2333{
2334	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
2335
2336	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2337	radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2338	/* wait for 3D idle clean */
2339	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2340	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2341	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2342	/* Emit fence sequence & fire IRQ */
2343	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2344	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2345	radeon_ring_write(rdev, fence->seq);
2346	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2347	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2348	radeon_ring_write(rdev, RB_INT_STAT);
2349}
2350
2351int r600_copy_blit(struct radeon_device *rdev,
2352		   uint64_t src_offset, uint64_t dst_offset,
2353		   unsigned num_pages, struct radeon_fence *fence)
2354{
2355	int r;
2356
2357	mutex_lock(&rdev->r600_blit.mutex);
2358	rdev->r600_blit.vb_ib = NULL;
2359	r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2360	if (r) {
2361		if (rdev->r600_blit.vb_ib)
2362			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2363		mutex_unlock(&rdev->r600_blit.mutex);
2364		return r;
2365	}
2366	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2367	r600_blit_done_copy(rdev, fence);
2368	mutex_unlock(&rdev->r600_blit.mutex);
2369	return 0;
2370}
2371
2372int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2373			 uint32_t tiling_flags, uint32_t pitch,
2374			 uint32_t offset, uint32_t obj_size)
2375{
2376	return 0;
2377}
2378
2379void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2380{
2381}
2382
2383
2384bool r600_card_posted(struct radeon_device *rdev)
2385{
2386	uint32_t reg;
2387
2388	/* first check CRTCs */
2389	reg = RREG32(D1CRTC_CONTROL) |
2390		RREG32(D2CRTC_CONTROL);
2391	if (reg & CRTC_EN)
2392		return true;
2393
2394	/* then check MEM_SIZE, in case the crtcs are off */
2395	if (RREG32(CONFIG_MEMSIZE))
2396		return true;
2397
2398	return false;
2399}
2400
2401int r600_startup(struct radeon_device *rdev)
2402{
2403	int r;
2404
2405	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2406		r = r600_init_microcode(rdev);
2407		if (r) {
2408			DRM_ERROR("Failed to load firmware!\n");
2409			return r;
2410		}
2411	}
2412
2413	r600_mc_program(rdev);
2414	if (rdev->flags & RADEON_IS_AGP) {
2415		r600_agp_enable(rdev);
2416	} else {
2417		r = r600_pcie_gart_enable(rdev);
2418		if (r)
2419			return r;
2420	}
2421	r600_gpu_init(rdev);
2422	r = r600_blit_init(rdev);
2423	if (r) {
2424		r600_blit_fini(rdev);
2425		rdev->asic->copy = NULL;
2426		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2427	}
2428	/* pin copy shader into vram */
2429	if (rdev->r600_blit.shader_obj) {
2430		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2431		if (unlikely(r != 0))
2432			return r;
2433		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2434				&rdev->r600_blit.shader_gpu_addr);
2435		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2436		if (r) {
2437			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2438			return r;
2439		}
2440	}
2441	/* Enable IRQ */
2442	r = r600_irq_init(rdev);
2443	if (r) {
2444		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2445		radeon_irq_kms_fini(rdev);
2446		return r;
2447	}
2448	r600_irq_set(rdev);
2449
2450	r = radeon_ring_init(rdev, rdev->cp.ring_size);
2451	if (r)
2452		return r;
2453	r = r600_cp_load_microcode(rdev);
2454	if (r)
2455		return r;
2456	r = r600_cp_resume(rdev);
2457	if (r)
2458		return r;
2459	/* write back buffer are not vital so don't worry about failure */
2460	r600_wb_enable(rdev);
2461	return 0;
2462}
2463
2464void r600_vga_set_state(struct radeon_device *rdev, bool state)
2465{
2466	uint32_t temp;
2467
2468	temp = RREG32(CONFIG_CNTL);
2469	if (state == false) {
2470		temp &= ~(1<<0);
2471		temp |= (1<<1);
2472	} else {
2473		temp &= ~(1<<1);
2474	}
2475	WREG32(CONFIG_CNTL, temp);
2476}
2477
2478int r600_resume(struct radeon_device *rdev)
2479{
2480	int r;
2481
2482	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2483	 * posting will perform necessary task to bring back GPU into good
2484	 * shape.
2485	 */
2486	/* post card */
2487	atom_asic_init(rdev->mode_info.atom_context);
2488
2489	r = r600_startup(rdev);
2490	if (r) {
2491		DRM_ERROR("r600 startup failed on resume\n");
2492		return r;
2493	}
2494
2495	r = r600_ib_test(rdev);
2496	if (r) {
2497		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2498		return r;
2499	}
2500
2501	r = r600_audio_init(rdev);
2502	if (r) {
2503		DRM_ERROR("radeon: audio resume failed\n");
2504		return r;
2505	}
2506
2507	return r;
2508}
2509
2510int r600_suspend(struct radeon_device *rdev)
2511{
2512	int r;
2513
2514	r600_audio_fini(rdev);
2515	r600_cp_stop(rdev);
2516	rdev->cp.ready = false;
2517	r600_irq_suspend(rdev);
2518	r600_wb_disable(rdev);
2519	r600_pcie_gart_disable(rdev);
2520	/* unpin shaders bo */
2521	if (rdev->r600_blit.shader_obj) {
2522		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2523		if (!r) {
2524			radeon_bo_unpin(rdev->r600_blit.shader_obj);
2525			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2526		}
2527	}
2528	return 0;
2529}
2530
2531/* Plan is to move initialization in that function and use
2532 * helper function so that radeon_device_init pretty much
2533 * do nothing more than calling asic specific function. This
2534 * should also allow to remove a bunch of callback function
2535 * like vram_info.
2536 */
2537int r600_init(struct radeon_device *rdev)
2538{
2539	int r;
2540
2541	r = radeon_dummy_page_init(rdev);
2542	if (r)
2543		return r;
2544	if (r600_debugfs_mc_info_init(rdev)) {
2545		DRM_ERROR("Failed to register debugfs file for mc !\n");
2546	}
2547	/* This don't do much */
2548	r = radeon_gem_init(rdev);
2549	if (r)
2550		return r;
2551	/* Read BIOS */
2552	if (!radeon_get_bios(rdev)) {
2553		if (ASIC_IS_AVIVO(rdev))
2554			return -EINVAL;
2555	}
2556	/* Must be an ATOMBIOS */
2557	if (!rdev->is_atom_bios) {
2558		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2559		return -EINVAL;
2560	}
2561	r = radeon_atombios_init(rdev);
2562	if (r)
2563		return r;
2564	/* Post card if necessary */
2565	if (!r600_card_posted(rdev)) {
2566		if (!rdev->bios) {
2567			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2568			return -EINVAL;
2569		}
2570		DRM_INFO("GPU not posted. posting now...\n");
2571		atom_asic_init(rdev->mode_info.atom_context);
2572	}
2573	/* Initialize scratch registers */
2574	r600_scratch_init(rdev);
2575	/* Initialize surface registers */
2576	radeon_surface_init(rdev);
2577	/* Initialize clocks */
2578	radeon_get_clock_info(rdev->ddev);
2579	/* Fence driver */
2580	r = radeon_fence_driver_init(rdev);
2581	if (r)
2582		return r;
2583	if (rdev->flags & RADEON_IS_AGP) {
2584		r = radeon_agp_init(rdev);
2585		if (r)
2586			radeon_agp_disable(rdev);
2587	}
2588	r = r600_mc_init(rdev);
2589	if (r)
2590		return r;
2591	/* Memory manager */
2592	r = radeon_bo_init(rdev);
2593	if (r)
2594		return r;
2595
2596	r = radeon_irq_kms_init(rdev);
2597	if (r)
2598		return r;
2599
2600	rdev->cp.ring_obj = NULL;
2601	r600_ring_init(rdev, 1024 * 1024);
2602
2603	rdev->ih.ring_obj = NULL;
2604	r600_ih_ring_init(rdev, 64 * 1024);
2605
2606	r = r600_pcie_gart_init(rdev);
2607	if (r)
2608		return r;
2609
2610	rdev->accel_working = true;
2611	r = r600_startup(rdev);
2612	if (r) {
2613		dev_err(rdev->dev, "disabling GPU acceleration\n");
2614		r600_cp_fini(rdev);
2615		r600_wb_fini(rdev);
2616		r600_irq_fini(rdev);
2617		radeon_irq_kms_fini(rdev);
2618		r600_pcie_gart_fini(rdev);
2619		rdev->accel_working = false;
2620	}
2621	if (rdev->accel_working) {
2622		r = radeon_ib_pool_init(rdev);
2623		if (r) {
2624			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2625			rdev->accel_working = false;
2626		} else {
2627			r = r600_ib_test(rdev);
2628			if (r) {
2629				dev_err(rdev->dev, "IB test failed (%d).\n", r);
2630				rdev->accel_working = false;
2631			}
2632		}
2633	}
2634
2635	r = r600_audio_init(rdev);
2636	if (r)
2637		return r; /* TODO error handling */
2638	return 0;
2639}
2640
2641void r600_fini(struct radeon_device *rdev)
2642{
2643	r600_audio_fini(rdev);
2644	r600_blit_fini(rdev);
2645	r600_cp_fini(rdev);
2646	r600_wb_fini(rdev);
2647	r600_irq_fini(rdev);
2648	radeon_irq_kms_fini(rdev);
2649	r600_pcie_gart_fini(rdev);
2650	radeon_agp_fini(rdev);
2651	radeon_gem_fini(rdev);
2652	radeon_fence_driver_fini(rdev);
2653	radeon_bo_fini(rdev);
2654	radeon_atombios_fini(rdev);
2655	kfree(rdev->bios);
2656	rdev->bios = NULL;
2657	radeon_dummy_page_fini(rdev);
2658}
2659
2660
2661/*
2662 * CS stuff
2663 */
2664void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2665{
2666	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2667	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2668	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2669	radeon_ring_write(rdev, ib->length_dw);
2670}
2671
2672int r600_ib_test(struct radeon_device *rdev)
2673{
2674	struct radeon_ib *ib;
2675	uint32_t scratch;
2676	uint32_t tmp = 0;
2677	unsigned i;
2678	int r;
2679
2680	r = radeon_scratch_get(rdev, &scratch);
2681	if (r) {
2682		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2683		return r;
2684	}
2685	WREG32(scratch, 0xCAFEDEAD);
2686	r = radeon_ib_get(rdev, &ib);
2687	if (r) {
2688		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2689		return r;
2690	}
2691	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2692	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2693	ib->ptr[2] = 0xDEADBEEF;
2694	ib->ptr[3] = PACKET2(0);
2695	ib->ptr[4] = PACKET2(0);
2696	ib->ptr[5] = PACKET2(0);
2697	ib->ptr[6] = PACKET2(0);
2698	ib->ptr[7] = PACKET2(0);
2699	ib->ptr[8] = PACKET2(0);
2700	ib->ptr[9] = PACKET2(0);
2701	ib->ptr[10] = PACKET2(0);
2702	ib->ptr[11] = PACKET2(0);
2703	ib->ptr[12] = PACKET2(0);
2704	ib->ptr[13] = PACKET2(0);
2705	ib->ptr[14] = PACKET2(0);
2706	ib->ptr[15] = PACKET2(0);
2707	ib->length_dw = 16;
2708	r = radeon_ib_schedule(rdev, ib);
2709	if (r) {
2710		radeon_scratch_free(rdev, scratch);
2711		radeon_ib_free(rdev, &ib);
2712		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2713		return r;
2714	}
2715	r = radeon_fence_wait(ib->fence, false);
2716	if (r) {
2717		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2718		return r;
2719	}
2720	for (i = 0; i < rdev->usec_timeout; i++) {
2721		tmp = RREG32(scratch);
2722		if (tmp == 0xDEADBEEF)
2723			break;
2724		DRM_UDELAY(1);
2725	}
2726	if (i < rdev->usec_timeout) {
2727		DRM_INFO("ib test succeeded in %u usecs\n", i);
2728	} else {
2729		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2730			  scratch, tmp);
2731		r = -EINVAL;
2732	}
2733	radeon_scratch_free(rdev, scratch);
2734	radeon_ib_free(rdev, &ib);
2735	return r;
2736}
2737
2738/*
2739 * Interrupts
2740 *
2741 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2742 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2743 * writing to the ring and the GPU consuming, the GPU writes to the ring
2744 * and host consumes.  As the host irq handler processes interrupts, it
2745 * increments the rptr.  When the rptr catches up with the wptr, all the
2746 * current interrupts have been processed.
2747 */
2748
2749void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2750{
2751	u32 rb_bufsz;
2752
2753	/* Align ring size */
2754	rb_bufsz = drm_order(ring_size / 4);
2755	ring_size = (1 << rb_bufsz) * 4;
2756	rdev->ih.ring_size = ring_size;
2757	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2758	rdev->ih.rptr = 0;
2759}
2760
2761static int r600_ih_ring_alloc(struct radeon_device *rdev)
2762{
2763	int r;
2764
2765	/* Allocate ring buffer */
2766	if (rdev->ih.ring_obj == NULL) {
2767		r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2768				     true,
2769				     RADEON_GEM_DOMAIN_GTT,
2770				     &rdev->ih.ring_obj);
2771		if (r) {
2772			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2773			return r;
2774		}
2775		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2776		if (unlikely(r != 0))
2777			return r;
2778		r = radeon_bo_pin(rdev->ih.ring_obj,
2779				  RADEON_GEM_DOMAIN_GTT,
2780				  &rdev->ih.gpu_addr);
2781		if (r) {
2782			radeon_bo_unreserve(rdev->ih.ring_obj);
2783			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2784			return r;
2785		}
2786		r = radeon_bo_kmap(rdev->ih.ring_obj,
2787				   (void **)&rdev->ih.ring);
2788		radeon_bo_unreserve(rdev->ih.ring_obj);
2789		if (r) {
2790			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2791			return r;
2792		}
2793	}
2794	return 0;
2795}
2796
2797static void r600_ih_ring_fini(struct radeon_device *rdev)
2798{
2799	int r;
2800	if (rdev->ih.ring_obj) {
2801		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2802		if (likely(r == 0)) {
2803			radeon_bo_kunmap(rdev->ih.ring_obj);
2804			radeon_bo_unpin(rdev->ih.ring_obj);
2805			radeon_bo_unreserve(rdev->ih.ring_obj);
2806		}
2807		radeon_bo_unref(&rdev->ih.ring_obj);
2808		rdev->ih.ring = NULL;
2809		rdev->ih.ring_obj = NULL;
2810	}
2811}
2812
2813void r600_rlc_stop(struct radeon_device *rdev)
2814{
2815
2816	if ((rdev->family >= CHIP_RV770) &&
2817	    (rdev->family <= CHIP_RV740)) {
2818		/* r7xx asics need to soft reset RLC before halting */
2819		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2820		RREG32(SRBM_SOFT_RESET);
2821		udelay(15000);
2822		WREG32(SRBM_SOFT_RESET, 0);
2823		RREG32(SRBM_SOFT_RESET);
2824	}
2825
2826	WREG32(RLC_CNTL, 0);
2827}
2828
2829static void r600_rlc_start(struct radeon_device *rdev)
2830{
2831	WREG32(RLC_CNTL, RLC_ENABLE);
2832}
2833
2834static int r600_rlc_init(struct radeon_device *rdev)
2835{
2836	u32 i;
2837	const __be32 *fw_data;
2838
2839	if (!rdev->rlc_fw)
2840		return -EINVAL;
2841
2842	r600_rlc_stop(rdev);
2843
2844	WREG32(RLC_HB_BASE, 0);
2845	WREG32(RLC_HB_CNTL, 0);
2846	WREG32(RLC_HB_RPTR, 0);
2847	WREG32(RLC_HB_WPTR, 0);
2848	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2849	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2850	WREG32(RLC_MC_CNTL, 0);
2851	WREG32(RLC_UCODE_CNTL, 0);
2852
2853	fw_data = (const __be32 *)rdev->rlc_fw->data;
2854	if (rdev->family >= CHIP_CEDAR) {
2855		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2856			WREG32(RLC_UCODE_ADDR, i);
2857			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2858		}
2859	} else if (rdev->family >= CHIP_RV770) {
2860		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2861			WREG32(RLC_UCODE_ADDR, i);
2862			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2863		}
2864	} else {
2865		for (i = 0; i < RLC_UCODE_SIZE; i++) {
2866			WREG32(RLC_UCODE_ADDR, i);
2867			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2868		}
2869	}
2870	WREG32(RLC_UCODE_ADDR, 0);
2871
2872	r600_rlc_start(rdev);
2873
2874	return 0;
2875}
2876
2877static void r600_enable_interrupts(struct radeon_device *rdev)
2878{
2879	u32 ih_cntl = RREG32(IH_CNTL);
2880	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2881
2882	ih_cntl |= ENABLE_INTR;
2883	ih_rb_cntl |= IH_RB_ENABLE;
2884	WREG32(IH_CNTL, ih_cntl);
2885	WREG32(IH_RB_CNTL, ih_rb_cntl);
2886	rdev->ih.enabled = true;
2887}
2888
2889void r600_disable_interrupts(struct radeon_device *rdev)
2890{
2891	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2892	u32 ih_cntl = RREG32(IH_CNTL);
2893
2894	ih_rb_cntl &= ~IH_RB_ENABLE;
2895	ih_cntl &= ~ENABLE_INTR;
2896	WREG32(IH_RB_CNTL, ih_rb_cntl);
2897	WREG32(IH_CNTL, ih_cntl);
2898	/* set rptr, wptr to 0 */
2899	WREG32(IH_RB_RPTR, 0);
2900	WREG32(IH_RB_WPTR, 0);
2901	rdev->ih.enabled = false;
2902	rdev->ih.wptr = 0;
2903	rdev->ih.rptr = 0;
2904}
2905
2906static void r600_disable_interrupt_state(struct radeon_device *rdev)
2907{
2908	u32 tmp;
2909
2910	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2911	WREG32(GRBM_INT_CNTL, 0);
2912	WREG32(DxMODE_INT_MASK, 0);
2913	if (ASIC_IS_DCE3(rdev)) {
2914		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2915		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2916		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2917		WREG32(DC_HPD1_INT_CONTROL, tmp);
2918		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2919		WREG32(DC_HPD2_INT_CONTROL, tmp);
2920		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2921		WREG32(DC_HPD3_INT_CONTROL, tmp);
2922		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2923		WREG32(DC_HPD4_INT_CONTROL, tmp);
2924		if (ASIC_IS_DCE32(rdev)) {
2925			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2926			WREG32(DC_HPD5_INT_CONTROL, tmp);
2927			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2928			WREG32(DC_HPD6_INT_CONTROL, tmp);
2929		}
2930	} else {
2931		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2932		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2933		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2934		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2935		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2936		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2937		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2938		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2939	}
2940}
2941
2942int r600_irq_init(struct radeon_device *rdev)
2943{
2944	int ret = 0;
2945	int rb_bufsz;
2946	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2947
2948	/* allocate ring */
2949	ret = r600_ih_ring_alloc(rdev);
2950	if (ret)
2951		return ret;
2952
2953	/* disable irqs */
2954	r600_disable_interrupts(rdev);
2955
2956	/* init rlc */
2957	ret = r600_rlc_init(rdev);
2958	if (ret) {
2959		r600_ih_ring_fini(rdev);
2960		return ret;
2961	}
2962
2963	/* setup interrupt control */
2964	/* set dummy read address to ring address */
2965	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2966	interrupt_cntl = RREG32(INTERRUPT_CNTL);
2967	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2968	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2969	 */
2970	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2971	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2972	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2973	WREG32(INTERRUPT_CNTL, interrupt_cntl);
2974
2975	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2976	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2977
2978	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2979		      IH_WPTR_OVERFLOW_CLEAR |
2980		      (rb_bufsz << 1));
2981	/* WPTR writeback, not yet */
2982	/*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2983	WREG32(IH_RB_WPTR_ADDR_LO, 0);
2984	WREG32(IH_RB_WPTR_ADDR_HI, 0);
2985
2986	WREG32(IH_RB_CNTL, ih_rb_cntl);
2987
2988	/* set rptr, wptr to 0 */
2989	WREG32(IH_RB_RPTR, 0);
2990	WREG32(IH_RB_WPTR, 0);
2991
2992	/* Default settings for IH_CNTL (disabled at first) */
2993	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2994	/* RPTR_REARM only works if msi's are enabled */
2995	if (rdev->msi_enabled)
2996		ih_cntl |= RPTR_REARM;
2997
2998#ifdef __BIG_ENDIAN
2999	ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
3000#endif
3001	WREG32(IH_CNTL, ih_cntl);
3002
3003	/* force the active interrupt state to all disabled */
3004	if (rdev->family >= CHIP_CEDAR)
3005		evergreen_disable_interrupt_state(rdev);
3006	else
3007		r600_disable_interrupt_state(rdev);
3008
3009	/* enable irqs */
3010	r600_enable_interrupts(rdev);
3011
3012	return ret;
3013}
3014
3015void r600_irq_suspend(struct radeon_device *rdev)
3016{
3017	r600_irq_disable(rdev);
3018	r600_rlc_stop(rdev);
3019}
3020
3021void r600_irq_fini(struct radeon_device *rdev)
3022{
3023	r600_irq_suspend(rdev);
3024	r600_ih_ring_fini(rdev);
3025}
3026
3027int r600_irq_set(struct radeon_device *rdev)
3028{
3029	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3030	u32 mode_int = 0;
3031	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3032	u32 grbm_int_cntl = 0;
3033	u32 hdmi1, hdmi2;
3034
3035	if (!rdev->irq.installed) {
3036		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
3037		return -EINVAL;
3038	}
3039	/* don't enable anything if the ih is disabled */
3040	if (!rdev->ih.enabled) {
3041		r600_disable_interrupts(rdev);
3042		/* force the active interrupt state to all disabled */
3043		r600_disable_interrupt_state(rdev);
3044		return 0;
3045	}
3046
3047	hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3048	if (ASIC_IS_DCE3(rdev)) {
3049		hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3050		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3051		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3052		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3053		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3054		if (ASIC_IS_DCE32(rdev)) {
3055			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3056			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3057		}
3058	} else {
3059		hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3060		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3061		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3062		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3063	}
3064
3065	if (rdev->irq.sw_int) {
3066		DRM_DEBUG("r600_irq_set: sw int\n");
3067		cp_int_cntl |= RB_INT_ENABLE;
3068	}
3069	if (rdev->irq.crtc_vblank_int[0]) {
3070		DRM_DEBUG("r600_irq_set: vblank 0\n");
3071		mode_int |= D1MODE_VBLANK_INT_MASK;
3072	}
3073	if (rdev->irq.crtc_vblank_int[1]) {
3074		DRM_DEBUG("r600_irq_set: vblank 1\n");
3075		mode_int |= D2MODE_VBLANK_INT_MASK;
3076	}
3077	if (rdev->irq.hpd[0]) {
3078		DRM_DEBUG("r600_irq_set: hpd 1\n");
3079		hpd1 |= DC_HPDx_INT_EN;
3080	}
3081	if (rdev->irq.hpd[1]) {
3082		DRM_DEBUG("r600_irq_set: hpd 2\n");
3083		hpd2 |= DC_HPDx_INT_EN;
3084	}
3085	if (rdev->irq.hpd[2]) {
3086		DRM_DEBUG("r600_irq_set: hpd 3\n");
3087		hpd3 |= DC_HPDx_INT_EN;
3088	}
3089	if (rdev->irq.hpd[3]) {
3090		DRM_DEBUG("r600_irq_set: hpd 4\n");
3091		hpd4 |= DC_HPDx_INT_EN;
3092	}
3093	if (rdev->irq.hpd[4]) {
3094		DRM_DEBUG("r600_irq_set: hpd 5\n");
3095		hpd5 |= DC_HPDx_INT_EN;
3096	}
3097	if (rdev->irq.hpd[5]) {
3098		DRM_DEBUG("r600_irq_set: hpd 6\n");
3099		hpd6 |= DC_HPDx_INT_EN;
3100	}
3101	if (rdev->irq.hdmi[0]) {
3102		DRM_DEBUG("r600_irq_set: hdmi 1\n");
3103		hdmi1 |= R600_HDMI_INT_EN;
3104	}
3105	if (rdev->irq.hdmi[1]) {
3106		DRM_DEBUG("r600_irq_set: hdmi 2\n");
3107		hdmi2 |= R600_HDMI_INT_EN;
3108	}
3109	if (rdev->irq.gui_idle) {
3110		DRM_DEBUG("gui idle\n");
3111		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3112	}
3113
3114	WREG32(CP_INT_CNTL, cp_int_cntl);
3115	WREG32(DxMODE_INT_MASK, mode_int);
3116	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3117	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3118	if (ASIC_IS_DCE3(rdev)) {
3119		WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3120		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3121		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3122		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3123		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3124		if (ASIC_IS_DCE32(rdev)) {
3125			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3126			WREG32(DC_HPD6_INT_CONTROL, hpd6);
3127		}
3128	} else {
3129		WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3130		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3131		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3132		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3133	}
3134
3135	return 0;
3136}
3137
3138static inline void r600_irq_ack(struct radeon_device *rdev,
3139				u32 *disp_int,
3140				u32 *disp_int_cont,
3141				u32 *disp_int_cont2)
3142{
3143	u32 tmp;
3144
3145	if (ASIC_IS_DCE3(rdev)) {
3146		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3147		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3148		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3149	} else {
3150		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
3151		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3152		*disp_int_cont2 = 0;
3153	}
3154
3155	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
3156		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3157	if (*disp_int & LB_D1_VLINE_INTERRUPT)
3158		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3159	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
3160		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3161	if (*disp_int & LB_D2_VLINE_INTERRUPT)
3162		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3163	if (*disp_int & DC_HPD1_INTERRUPT) {
3164		if (ASIC_IS_DCE3(rdev)) {
3165			tmp = RREG32(DC_HPD1_INT_CONTROL);
3166			tmp |= DC_HPDx_INT_ACK;
3167			WREG32(DC_HPD1_INT_CONTROL, tmp);
3168		} else {
3169			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3170			tmp |= DC_HPDx_INT_ACK;
3171			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3172		}
3173	}
3174	if (*disp_int & DC_HPD2_INTERRUPT) {
3175		if (ASIC_IS_DCE3(rdev)) {
3176			tmp = RREG32(DC_HPD2_INT_CONTROL);
3177			tmp |= DC_HPDx_INT_ACK;
3178			WREG32(DC_HPD2_INT_CONTROL, tmp);
3179		} else {
3180			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3181			tmp |= DC_HPDx_INT_ACK;
3182			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3183		}
3184	}
3185	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3186		if (ASIC_IS_DCE3(rdev)) {
3187			tmp = RREG32(DC_HPD3_INT_CONTROL);
3188			tmp |= DC_HPDx_INT_ACK;
3189			WREG32(DC_HPD3_INT_CONTROL, tmp);
3190		} else {
3191			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3192			tmp |= DC_HPDx_INT_ACK;
3193			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3194		}
3195	}
3196	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3197		tmp = RREG32(DC_HPD4_INT_CONTROL);
3198		tmp |= DC_HPDx_INT_ACK;
3199		WREG32(DC_HPD4_INT_CONTROL, tmp);
3200	}
3201	if (ASIC_IS_DCE32(rdev)) {
3202		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3203			tmp = RREG32(DC_HPD5_INT_CONTROL);
3204			tmp |= DC_HPDx_INT_ACK;
3205			WREG32(DC_HPD5_INT_CONTROL, tmp);
3206		}
3207		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3208			tmp = RREG32(DC_HPD5_INT_CONTROL);
3209			tmp |= DC_HPDx_INT_ACK;
3210			WREG32(DC_HPD6_INT_CONTROL, tmp);
3211		}
3212	}
3213	if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3214		WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3215	}
3216	if (ASIC_IS_DCE3(rdev)) {
3217		if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3218			WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3219		}
3220	} else {
3221		if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3222			WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3223		}
3224	}
3225}
3226
3227void r600_irq_disable(struct radeon_device *rdev)
3228{
3229	u32 disp_int, disp_int_cont, disp_int_cont2;
3230
3231	r600_disable_interrupts(rdev);
3232	/* Wait and acknowledge irq */
3233	mdelay(1);
3234	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3235	r600_disable_interrupt_state(rdev);
3236}
3237
3238static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3239{
3240	u32 wptr, tmp;
3241
3242	wptr = RREG32(IH_RB_WPTR);
3243
3244	if (wptr & RB_OVERFLOW) {
3245		/* When a ring buffer overflow happen start parsing interrupt
3246		 * from the last not overwritten vector (wptr + 16). Hopefully
3247		 * this should allow us to catchup.
3248		 */
3249		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3250			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3251		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3252		tmp = RREG32(IH_RB_CNTL);
3253		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3254		WREG32(IH_RB_CNTL, tmp);
3255	}
3256	return (wptr & rdev->ih.ptr_mask);
3257}
3258
3259/*        r600 IV Ring
3260 * Each IV ring entry is 128 bits:
3261 * [7:0]    - interrupt source id
3262 * [31:8]   - reserved
3263 * [59:32]  - interrupt source data
3264 * [127:60]  - reserved
3265 *
3266 * The basic interrupt vector entries
3267 * are decoded as follows:
3268 * src_id  src_data  description
3269 *      1         0  D1 Vblank
3270 *      1         1  D1 Vline
3271 *      5         0  D2 Vblank
3272 *      5         1  D2 Vline
3273 *     19         0  FP Hot plug detection A
3274 *     19         1  FP Hot plug detection B
3275 *     19         2  DAC A auto-detection
3276 *     19         3  DAC B auto-detection
3277 *     21         4  HDMI block A
3278 *     21         5  HDMI block B
3279 *    176         -  CP_INT RB
3280 *    177         -  CP_INT IB1
3281 *    178         -  CP_INT IB2
3282 *    181         -  EOP Interrupt
3283 *    233         -  GUI Idle
3284 *
3285 * Note, these are based on r600 and may need to be
3286 * adjusted or added to on newer asics
3287 */
3288
3289int r600_irq_process(struct radeon_device *rdev)
3290{
3291	u32 wptr = r600_get_ih_wptr(rdev);
3292	u32 rptr = rdev->ih.rptr;
3293	u32 src_id, src_data;
3294	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3295	unsigned long flags;
3296	bool queue_hotplug = false;
3297
3298	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3299	if (!rdev->ih.enabled)
3300		return IRQ_NONE;
3301
3302	spin_lock_irqsave(&rdev->ih.lock, flags);
3303
3304	if (rptr == wptr) {
3305		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3306		return IRQ_NONE;
3307	}
3308	if (rdev->shutdown) {
3309		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3310		return IRQ_NONE;
3311	}
3312
3313restart_ih:
3314	/* display interrupts */
3315	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3316
3317	rdev->ih.wptr = wptr;
3318	while (rptr != wptr) {
3319		/* wptr/rptr are in bytes! */
3320		ring_index = rptr / 4;
3321		src_id =  rdev->ih.ring[ring_index] & 0xff;
3322		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3323
3324		switch (src_id) {
3325		case 1: /* D1 vblank/vline */
3326			switch (src_data) {
3327			case 0: /* D1 vblank */
3328				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3329					drm_handle_vblank(rdev->ddev, 0);
3330					rdev->pm.vblank_sync = true;
3331					wake_up(&rdev->irq.vblank_queue);
3332					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3333					DRM_DEBUG("IH: D1 vblank\n");
3334				}
3335				break;
3336			case 1: /* D1 vline */
3337				if (disp_int & LB_D1_VLINE_INTERRUPT) {
3338					disp_int &= ~LB_D1_VLINE_INTERRUPT;
3339					DRM_DEBUG("IH: D1 vline\n");
3340				}
3341				break;
3342			default:
3343				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3344				break;
3345			}
3346			break;
3347		case 5: /* D2 vblank/vline */
3348			switch (src_data) {
3349			case 0: /* D2 vblank */
3350				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3351					drm_handle_vblank(rdev->ddev, 1);
3352					rdev->pm.vblank_sync = true;
3353					wake_up(&rdev->irq.vblank_queue);
3354					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3355					DRM_DEBUG("IH: D2 vblank\n");
3356				}
3357				break;
3358			case 1: /* D1 vline */
3359				if (disp_int & LB_D2_VLINE_INTERRUPT) {
3360					disp_int &= ~LB_D2_VLINE_INTERRUPT;
3361					DRM_DEBUG("IH: D2 vline\n");
3362				}
3363				break;
3364			default:
3365				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3366				break;
3367			}
3368			break;
3369		case 19: /* HPD/DAC hotplug */
3370			switch (src_data) {
3371			case 0:
3372				if (disp_int & DC_HPD1_INTERRUPT) {
3373					disp_int &= ~DC_HPD1_INTERRUPT;
3374					queue_hotplug = true;
3375					DRM_DEBUG("IH: HPD1\n");
3376				}
3377				break;
3378			case 1:
3379				if (disp_int & DC_HPD2_INTERRUPT) {
3380					disp_int &= ~DC_HPD2_INTERRUPT;
3381					queue_hotplug = true;
3382					DRM_DEBUG("IH: HPD2\n");
3383				}
3384				break;
3385			case 4:
3386				if (disp_int_cont & DC_HPD3_INTERRUPT) {
3387					disp_int_cont &= ~DC_HPD3_INTERRUPT;
3388					queue_hotplug = true;
3389					DRM_DEBUG("IH: HPD3\n");
3390				}
3391				break;
3392			case 5:
3393				if (disp_int_cont & DC_HPD4_INTERRUPT) {
3394					disp_int_cont &= ~DC_HPD4_INTERRUPT;
3395					queue_hotplug = true;
3396					DRM_DEBUG("IH: HPD4\n");
3397				}
3398				break;
3399			case 10:
3400				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3401					disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3402					queue_hotplug = true;
3403					DRM_DEBUG("IH: HPD5\n");
3404				}
3405				break;
3406			case 12:
3407				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3408					disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3409					queue_hotplug = true;
3410					DRM_DEBUG("IH: HPD6\n");
3411				}
3412				break;
3413			default:
3414				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3415				break;
3416			}
3417			break;
3418		case 21: /* HDMI */
3419			DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3420			r600_audio_schedule_polling(rdev);
3421			break;
3422		case 176: /* CP_INT in ring buffer */
3423		case 177: /* CP_INT in IB1 */
3424		case 178: /* CP_INT in IB2 */
3425			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3426			radeon_fence_process(rdev);
3427			break;
3428		case 181: /* CP EOP event */
3429			DRM_DEBUG("IH: CP EOP\n");
3430			break;
3431		case 233: /* GUI IDLE */
3432			DRM_DEBUG("IH: CP EOP\n");
3433			rdev->pm.gui_idle = true;
3434			wake_up(&rdev->irq.idle_queue);
3435			break;
3436		default:
3437			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3438			break;
3439		}
3440
3441		/* wptr/rptr are in bytes! */
3442		rptr += 16;
3443		rptr &= rdev->ih.ptr_mask;
3444	}
3445	/* make sure wptr hasn't changed while processing */
3446	wptr = r600_get_ih_wptr(rdev);
3447	if (wptr != rdev->ih.wptr)
3448		goto restart_ih;
3449	if (queue_hotplug)
3450		queue_work(rdev->wq, &rdev->hotplug_work);
3451	rdev->ih.rptr = rptr;
3452	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3453	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3454	return IRQ_HANDLED;
3455}
3456
3457/*
3458 * Debugfs info
3459 */
3460#if defined(CONFIG_DEBUG_FS)
3461
3462static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3463{
3464	struct drm_info_node *node = (struct drm_info_node *) m->private;
3465	struct drm_device *dev = node->minor->dev;
3466	struct radeon_device *rdev = dev->dev_private;
3467	unsigned count, i, j;
3468
3469	radeon_ring_free_size(rdev);
3470	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3471	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3472	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3473	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3474	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3475	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3476	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3477	seq_printf(m, "%u dwords in ring\n", count);
3478	i = rdev->cp.rptr;
3479	for (j = 0; j <= count; j++) {
3480		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3481		i = (i + 1) & rdev->cp.ptr_mask;
3482	}
3483	return 0;
3484}
3485
3486static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3487{
3488	struct drm_info_node *node = (struct drm_info_node *) m->private;
3489	struct drm_device *dev = node->minor->dev;
3490	struct radeon_device *rdev = dev->dev_private;
3491
3492	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3493	DREG32_SYS(m, rdev, VM_L2_STATUS);
3494	return 0;
3495}
3496
3497static struct drm_info_list r600_mc_info_list[] = {
3498	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3499	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3500};
3501#endif
3502
3503int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3504{
3505#if defined(CONFIG_DEBUG_FS)
3506	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3507#else
3508	return 0;
3509#endif
3510}
3511
3512/**
3513 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3514 * rdev: radeon device structure
3515 * bo: buffer object struct which userspace is waiting for idle
3516 *
3517 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3518 * through ring buffer, this leads to corruption in rendering, see
3519 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3520 * directly perform HDP flush by writing register through MMIO.
3521 */
3522void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3523{
3524	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
3525	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3526	 * This seems to cause problems on some AGP cards. Just use the old
3527	 * method for them.
3528	 */
3529	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3530	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3531		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3532		u32 tmp;
3533
3534		WREG32(HDP_DEBUG1, 0);
3535		tmp = readl((void __iomem *)ptr);
3536	} else
3537		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3538}
3539