evergreen.c revision 360303
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: stable/11/sys/dev/drm2/radeon/evergreen.c 360303 2020-04-25 13:10:17Z dim $");
27
28#include <dev/drm2/drmP.h>
29#include "radeon.h"
30#include "radeon_asic.h"
31#include <dev/drm2/radeon/radeon_drm.h>
32#include "evergreend.h"
33#include "atom.h"
34#include "avivod.h"
35#include "evergreen_reg.h"
36#include "evergreen_blit_shaders.h"
37
38#define EVERGREEN_PFP_UCODE_SIZE 1120
39#define EVERGREEN_PM4_UCODE_SIZE 1376
40
41static const u32 crtc_offsets[6] =
42{
43	EVERGREEN_CRTC0_REGISTER_OFFSET,
44	EVERGREEN_CRTC1_REGISTER_OFFSET,
45	EVERGREEN_CRTC2_REGISTER_OFFSET,
46	EVERGREEN_CRTC3_REGISTER_OFFSET,
47	EVERGREEN_CRTC4_REGISTER_OFFSET,
48	EVERGREEN_CRTC5_REGISTER_OFFSET
49};
50
51static void evergreen_gpu_init(struct radeon_device *rdev);
52#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
53void evergreen_fini(struct radeon_device *rdev);
54#endif
55void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
56#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
57extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
58				     int ring, u32 cp_int_cntl);
59#endif
60
61void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
62			     unsigned *bankh, unsigned *mtaspect,
63			     unsigned *tile_split)
64{
65	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
66	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
67	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
68	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
69	switch (*bankw) {
70	default:
71	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
72	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
73	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
74	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
75	}
76	switch (*bankh) {
77	default:
78	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
79	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
80	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
81	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
82	}
83	switch (*mtaspect) {
84	default:
85	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
86	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
87	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
88	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
89	}
90}
91
92void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
93{
94	u16 ctl, v;
95	int err, cap;
96
97	err = pci_find_cap(rdev->dev, PCIY_EXPRESS, &cap);
98	if (err)
99		return;
100
101	cap += PCIER_DEVICE_CTL;
102
103	ctl = pci_read_config(rdev->dev, cap, 2);
104
105	v = (ctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
106
107	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
108	 * to avoid hangs or performance issues
109	 */
110	if ((v == 0) || (v == 6) || (v == 7)) {
111		ctl &= ~PCIEM_CTL_MAX_READ_REQUEST;
112		ctl |= (2 << 12);
113		pci_write_config(rdev->dev, cap, ctl, 2);
114	}
115}
116
117static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
118{
119	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
120		return true;
121	else
122		return false;
123}
124
125static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
126{
127	u32 pos1, pos2;
128
129	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
130	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
131
132	if (pos1 != pos2)
133		return true;
134	else
135		return false;
136}
137
138/**
139 * dce4_wait_for_vblank - vblank wait asic callback.
140 *
141 * @rdev: radeon_device pointer
142 * @crtc: crtc to wait for vblank on
143 *
144 * Wait for vblank on the requested crtc (evergreen+).
145 */
146void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
147{
148	unsigned i = 0;
149
150	if (crtc >= rdev->num_crtc)
151		return;
152
153	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
154		return;
155
156	/* depending on when we hit vblank, we may be close to active; if so,
157	 * wait for another frame.
158	 */
159	while (dce4_is_in_vblank(rdev, crtc)) {
160		if (i++ % 100 == 0) {
161			if (!dce4_is_counter_moving(rdev, crtc))
162				break;
163		}
164	}
165
166	while (!dce4_is_in_vblank(rdev, crtc)) {
167		if (i++ % 100 == 0) {
168			if (!dce4_is_counter_moving(rdev, crtc))
169				break;
170		}
171	}
172}
173
174/**
175 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
176 *
177 * @rdev: radeon_device pointer
178 * @crtc: crtc to prepare for pageflip on
179 *
180 * Pre-pageflip callback (evergreen+).
181 * Enables the pageflip irq (vblank irq).
182 */
183void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
184{
185	/* enable the pflip int */
186	radeon_irq_kms_pflip_irq_get(rdev, crtc);
187}
188
189/**
190 * evergreen_post_page_flip - pos-pageflip callback.
191 *
192 * @rdev: radeon_device pointer
193 * @crtc: crtc to cleanup pageflip on
194 *
195 * Post-pageflip callback (evergreen+).
196 * Disables the pageflip irq (vblank irq).
197 */
198void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
199{
200	/* disable the pflip int */
201	radeon_irq_kms_pflip_irq_put(rdev, crtc);
202}
203
204/**
205 * evergreen_page_flip - pageflip callback.
206 *
207 * @rdev: radeon_device pointer
208 * @crtc_id: crtc to cleanup pageflip on
209 * @crtc_base: new address of the crtc (GPU MC address)
210 *
211 * Does the actual pageflip (evergreen+).
212 * During vblank we take the crtc lock and wait for the update_pending
213 * bit to go high, when it does, we release the lock, and allow the
214 * double buffered update to take place.
215 * Returns the current update pending status.
216 */
217u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
218{
219	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
220	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
221	int i;
222
223	/* Lock the graphics update lock */
224	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
225	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
226
227	/* update the scanout addresses */
228	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
229	       upper_32_bits(crtc_base));
230	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
231	       (u32)crtc_base);
232
233	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
234	       upper_32_bits(crtc_base));
235	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
236	       (u32)crtc_base);
237
238	/* Wait for update_pending to go high. */
239	for (i = 0; i < rdev->usec_timeout; i++) {
240		if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
241			break;
242		udelay(1);
243	}
244	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
245
246	/* Unlock the lock, so double-buffering can take place inside vblank */
247	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
248	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
249
250	/* Return current update_pending status: */
251	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
252}
253
254/* get temperature in millidegrees */
255int evergreen_get_temp(struct radeon_device *rdev)
256{
257	u32 temp, toffset;
258	int actual_temp = 0;
259
260	if (rdev->family == CHIP_JUNIPER) {
261		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
262			TOFFSET_SHIFT;
263		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
264			TS0_ADC_DOUT_SHIFT;
265
266		if (toffset & 0x100)
267			actual_temp = temp / 2 - (0x200 - toffset);
268		else
269			actual_temp = temp / 2 + toffset;
270
271		actual_temp = actual_temp * 1000;
272
273	} else {
274		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
275			ASIC_T_SHIFT;
276
277		if (temp & 0x400)
278			actual_temp = -256;
279		else if (temp & 0x200)
280			actual_temp = 255;
281		else if (temp & 0x100) {
282			actual_temp = temp & 0x1ff;
283			actual_temp |= ~0x1ff;
284		} else
285			actual_temp = temp & 0xff;
286
287		actual_temp = (actual_temp * 1000) / 2;
288	}
289
290	return actual_temp;
291}
292
293int sumo_get_temp(struct radeon_device *rdev)
294{
295	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
296	int actual_temp = temp - 49;
297
298	return actual_temp * 1000;
299}
300
301/**
302 * sumo_pm_init_profile - Initialize power profiles callback.
303 *
304 * @rdev: radeon_device pointer
305 *
306 * Initialize the power states used in profile mode
307 * (sumo, trinity, SI).
308 * Used for profile mode only.
309 */
310void sumo_pm_init_profile(struct radeon_device *rdev)
311{
312	int idx;
313
314	/* default */
315	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
316	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
317	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
318	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
319
320	/* low,mid sh/mh */
321	if (rdev->flags & RADEON_IS_MOBILITY)
322		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
323	else
324		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
325
326	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
327	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
328	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
329	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
330
331	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
332	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
333	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
334	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
335
336	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
337	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
338	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
339	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
340
341	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
342	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
343	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
344	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
345
346	/* high sh/mh */
347	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
348	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
349	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
350	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
351	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
352		rdev->pm.power_state[idx].num_clock_modes - 1;
353
354	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
355	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
356	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
357	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
358		rdev->pm.power_state[idx].num_clock_modes - 1;
359}
360
361/**
362 * btc_pm_init_profile - Initialize power profiles callback.
363 *
364 * @rdev: radeon_device pointer
365 *
366 * Initialize the power states used in profile mode
367 * (BTC, cayman).
368 * Used for profile mode only.
369 */
370void btc_pm_init_profile(struct radeon_device *rdev)
371{
372	int idx;
373
374	/* default */
375	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
376	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
377	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
378	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
379	/* starting with BTC, there is one state that is used for both
380	 * MH and SH.  Difference is that we always use the high clock index for
381	 * mclk.
382	 */
383	if (rdev->flags & RADEON_IS_MOBILITY)
384		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
385	else
386		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
387	/* low sh */
388	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
389	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
390	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
391	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
392	/* mid sh */
393	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
394	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
395	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
396	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
397	/* high sh */
398	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
399	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
400	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
401	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
402	/* low mh */
403	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
404	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
405	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
406	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
407	/* mid mh */
408	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
409	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
410	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
411	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
412	/* high mh */
413	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
414	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
415	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
416	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
417}
418
419/**
420 * evergreen_pm_misc - set additional pm hw parameters callback.
421 *
422 * @rdev: radeon_device pointer
423 *
424 * Set non-clock parameters associated with a power state
425 * (voltage, etc.) (evergreen+).
426 */
427void evergreen_pm_misc(struct radeon_device *rdev)
428{
429	int req_ps_idx = rdev->pm.requested_power_state_index;
430	int req_cm_idx = rdev->pm.requested_clock_mode_index;
431	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
432	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
433
434	if (voltage->type == VOLTAGE_SW) {
435		/* 0xff01 is a flag rather then an actual voltage */
436		if (voltage->voltage == 0xff01)
437			return;
438		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
439			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
440			rdev->pm.current_vddc = voltage->voltage;
441			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
442		}
443
444		/* starting with BTC, there is one state that is used for both
445		 * MH and SH.  Difference is that we always use the high clock index for
446		 * mclk and vddci.
447		 */
448		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
449		    (rdev->family >= CHIP_BARTS) &&
450		    rdev->pm.active_crtc_count &&
451		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
452		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
453			voltage = &rdev->pm.power_state[req_ps_idx].
454				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
455
456		/* 0xff01 is a flag rather then an actual voltage */
457		if (voltage->vddci == 0xff01)
458			return;
459		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
460			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
461			rdev->pm.current_vddci = voltage->vddci;
462			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
463		}
464	}
465}
466
467/**
468 * evergreen_pm_prepare - pre-power state change callback.
469 *
470 * @rdev: radeon_device pointer
471 *
472 * Prepare for a power state change (evergreen+).
473 */
474void evergreen_pm_prepare(struct radeon_device *rdev)
475{
476	struct drm_device *ddev = rdev->ddev;
477	struct drm_crtc *crtc;
478	struct radeon_crtc *radeon_crtc;
479	u32 tmp;
480
481	/* disable any active CRTCs */
482	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
483		radeon_crtc = to_radeon_crtc(crtc);
484		if (radeon_crtc->enabled) {
485			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
486			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
487			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
488		}
489	}
490}
491
492/**
493 * evergreen_pm_finish - post-power state change callback.
494 *
495 * @rdev: radeon_device pointer
496 *
497 * Clean up after a power state change (evergreen+).
498 */
499void evergreen_pm_finish(struct radeon_device *rdev)
500{
501	struct drm_device *ddev = rdev->ddev;
502	struct drm_crtc *crtc;
503	struct radeon_crtc *radeon_crtc;
504	u32 tmp;
505
506	/* enable any active CRTCs */
507	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
508		radeon_crtc = to_radeon_crtc(crtc);
509		if (radeon_crtc->enabled) {
510			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
511			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
512			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
513		}
514	}
515}
516
517/**
518 * evergreen_hpd_sense - hpd sense callback.
519 *
520 * @rdev: radeon_device pointer
521 * @hpd: hpd (hotplug detect) pin
522 *
523 * Checks if a digital monitor is connected (evergreen+).
524 * Returns true if connected, false if not connected.
525 */
526bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
527{
528	bool connected = false;
529
530	switch (hpd) {
531	case RADEON_HPD_1:
532		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
533			connected = true;
534		break;
535	case RADEON_HPD_2:
536		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
537			connected = true;
538		break;
539	case RADEON_HPD_3:
540		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
541			connected = true;
542		break;
543	case RADEON_HPD_4:
544		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
545			connected = true;
546		break;
547	case RADEON_HPD_5:
548		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
549			connected = true;
550		break;
551	case RADEON_HPD_6:
552		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
553			connected = true;
554		break;
555	default:
556		break;
557	}
558
559	return connected;
560}
561
562/**
563 * evergreen_hpd_set_polarity - hpd set polarity callback.
564 *
565 * @rdev: radeon_device pointer
566 * @hpd: hpd (hotplug detect) pin
567 *
568 * Set the polarity of the hpd pin (evergreen+).
569 */
570void evergreen_hpd_set_polarity(struct radeon_device *rdev,
571				enum radeon_hpd_id hpd)
572{
573	u32 tmp;
574	bool connected = evergreen_hpd_sense(rdev, hpd);
575
576	switch (hpd) {
577	case RADEON_HPD_1:
578		tmp = RREG32(DC_HPD1_INT_CONTROL);
579		if (connected)
580			tmp &= ~DC_HPDx_INT_POLARITY;
581		else
582			tmp |= DC_HPDx_INT_POLARITY;
583		WREG32(DC_HPD1_INT_CONTROL, tmp);
584		break;
585	case RADEON_HPD_2:
586		tmp = RREG32(DC_HPD2_INT_CONTROL);
587		if (connected)
588			tmp &= ~DC_HPDx_INT_POLARITY;
589		else
590			tmp |= DC_HPDx_INT_POLARITY;
591		WREG32(DC_HPD2_INT_CONTROL, tmp);
592		break;
593	case RADEON_HPD_3:
594		tmp = RREG32(DC_HPD3_INT_CONTROL);
595		if (connected)
596			tmp &= ~DC_HPDx_INT_POLARITY;
597		else
598			tmp |= DC_HPDx_INT_POLARITY;
599		WREG32(DC_HPD3_INT_CONTROL, tmp);
600		break;
601	case RADEON_HPD_4:
602		tmp = RREG32(DC_HPD4_INT_CONTROL);
603		if (connected)
604			tmp &= ~DC_HPDx_INT_POLARITY;
605		else
606			tmp |= DC_HPDx_INT_POLARITY;
607		WREG32(DC_HPD4_INT_CONTROL, tmp);
608		break;
609	case RADEON_HPD_5:
610		tmp = RREG32(DC_HPD5_INT_CONTROL);
611		if (connected)
612			tmp &= ~DC_HPDx_INT_POLARITY;
613		else
614			tmp |= DC_HPDx_INT_POLARITY;
615		WREG32(DC_HPD5_INT_CONTROL, tmp);
616			break;
617	case RADEON_HPD_6:
618		tmp = RREG32(DC_HPD6_INT_CONTROL);
619		if (connected)
620			tmp &= ~DC_HPDx_INT_POLARITY;
621		else
622			tmp |= DC_HPDx_INT_POLARITY;
623		WREG32(DC_HPD6_INT_CONTROL, tmp);
624		break;
625	default:
626		break;
627	}
628}
629
630/**
631 * evergreen_hpd_init - hpd setup callback.
632 *
633 * @rdev: radeon_device pointer
634 *
635 * Setup the hpd pins used by the card (evergreen+).
636 * Enable the pin, set the polarity, and enable the hpd interrupts.
637 */
638void evergreen_hpd_init(struct radeon_device *rdev)
639{
640	struct drm_device *dev = rdev->ddev;
641	struct drm_connector *connector;
642	unsigned enabled = 0;
643	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
644		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
645
646	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
647		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
648
649		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
650		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
651			/* don't try to enable hpd on eDP or LVDS avoid breaking the
652			 * aux dp channel on imac and help (but not completely fix)
653			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
654			 * also avoid interrupt storms during dpms.
655			 */
656			continue;
657		}
658		switch (radeon_connector->hpd.hpd) {
659		case RADEON_HPD_1:
660			WREG32(DC_HPD1_CONTROL, tmp);
661			break;
662		case RADEON_HPD_2:
663			WREG32(DC_HPD2_CONTROL, tmp);
664			break;
665		case RADEON_HPD_3:
666			WREG32(DC_HPD3_CONTROL, tmp);
667			break;
668		case RADEON_HPD_4:
669			WREG32(DC_HPD4_CONTROL, tmp);
670			break;
671		case RADEON_HPD_5:
672			WREG32(DC_HPD5_CONTROL, tmp);
673			break;
674		case RADEON_HPD_6:
675			WREG32(DC_HPD6_CONTROL, tmp);
676			break;
677		default:
678			break;
679		}
680		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
681		enabled |= 1 << radeon_connector->hpd.hpd;
682	}
683	radeon_irq_kms_enable_hpd(rdev, enabled);
684}
685
686/**
687 * evergreen_hpd_fini - hpd tear down callback.
688 *
689 * @rdev: radeon_device pointer
690 *
691 * Tear down the hpd pins used by the card (evergreen+).
692 * Disable the hpd interrupts.
693 */
694void evergreen_hpd_fini(struct radeon_device *rdev)
695{
696	struct drm_device *dev = rdev->ddev;
697	struct drm_connector *connector;
698	unsigned disabled = 0;
699
700	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
701		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
702		switch (radeon_connector->hpd.hpd) {
703		case RADEON_HPD_1:
704			WREG32(DC_HPD1_CONTROL, 0);
705			break;
706		case RADEON_HPD_2:
707			WREG32(DC_HPD2_CONTROL, 0);
708			break;
709		case RADEON_HPD_3:
710			WREG32(DC_HPD3_CONTROL, 0);
711			break;
712		case RADEON_HPD_4:
713			WREG32(DC_HPD4_CONTROL, 0);
714			break;
715		case RADEON_HPD_5:
716			WREG32(DC_HPD5_CONTROL, 0);
717			break;
718		case RADEON_HPD_6:
719			WREG32(DC_HPD6_CONTROL, 0);
720			break;
721		default:
722			break;
723		}
724		disabled |= 1 << radeon_connector->hpd.hpd;
725	}
726	radeon_irq_kms_disable_hpd(rdev, disabled);
727}
728
729/* watermark setup */
730
731static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
732					struct radeon_crtc *radeon_crtc,
733					struct drm_display_mode *mode,
734					struct drm_display_mode *other_mode)
735{
736	u32 tmp;
737	/*
738	 * Line Buffer Setup
739	 * There are 3 line buffers, each one shared by 2 display controllers.
740	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
741	 * the display controllers.  The paritioning is done via one of four
742	 * preset allocations specified in bits 2:0:
743	 * first display controller
744	 *  0 - first half of lb (3840 * 2)
745	 *  1 - first 3/4 of lb (5760 * 2)
746	 *  2 - whole lb (7680 * 2), other crtc must be disabled
747	 *  3 - first 1/4 of lb (1920 * 2)
748	 * second display controller
749	 *  4 - second half of lb (3840 * 2)
750	 *  5 - second 3/4 of lb (5760 * 2)
751	 *  6 - whole lb (7680 * 2), other crtc must be disabled
752	 *  7 - last 1/4 of lb (1920 * 2)
753	 */
754	/* this can get tricky if we have two large displays on a paired group
755	 * of crtcs.  Ideally for multiple large displays we'd assign them to
756	 * non-linked crtcs for maximum line buffer allocation.
757	 */
758	if (radeon_crtc->base.enabled && mode) {
759		if (other_mode)
760			tmp = 0; /* 1/2 */
761		else
762			tmp = 2; /* whole */
763	} else
764		tmp = 0;
765
766	/* second controller of the pair uses second half of the lb */
767	if (radeon_crtc->crtc_id % 2)
768		tmp += 4;
769	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
770
771	if (radeon_crtc->base.enabled && mode) {
772		switch (tmp) {
773		case 0:
774		case 4:
775		default:
776			if (ASIC_IS_DCE5(rdev))
777				return 4096 * 2;
778			else
779				return 3840 * 2;
780		case 1:
781		case 5:
782			if (ASIC_IS_DCE5(rdev))
783				return 6144 * 2;
784			else
785				return 5760 * 2;
786		case 2:
787		case 6:
788			if (ASIC_IS_DCE5(rdev))
789				return 8192 * 2;
790			else
791				return 7680 * 2;
792		case 3:
793		case 7:
794			if (ASIC_IS_DCE5(rdev))
795				return 2048 * 2;
796			else
797				return 1920 * 2;
798		}
799	}
800
801	/* controller not enabled, so no lb used */
802	return 0;
803}
804
805u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
806{
807	u32 tmp = RREG32(MC_SHARED_CHMAP);
808
809	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
810	case 0:
811	default:
812		return 1;
813	case 1:
814		return 2;
815	case 2:
816		return 4;
817	case 3:
818		return 8;
819	}
820}
821
822struct evergreen_wm_params {
823	u32 dram_channels; /* number of dram channels */
824	u32 yclk;          /* bandwidth per dram data pin in kHz */
825	u32 sclk;          /* engine clock in kHz */
826	u32 disp_clk;      /* display clock in kHz */
827	u32 src_width;     /* viewport width */
828	u32 active_time;   /* active display time in ns */
829	u32 blank_time;    /* blank time in ns */
830	bool interlaced;    /* mode is interlaced */
831	fixed20_12 vsc;    /* vertical scale ratio */
832	u32 num_heads;     /* number of active crtcs */
833	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
834	u32 lb_size;       /* line buffer allocated to pipe */
835	u32 vtaps;         /* vertical scaler taps */
836};
837
838static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
839{
840	/* Calculate DRAM Bandwidth and the part allocated to display. */
841	fixed20_12 dram_efficiency; /* 0.7 */
842	fixed20_12 yclk, dram_channels, bandwidth;
843	fixed20_12 a;
844
845	a.full = dfixed_const(1000);
846	yclk.full = dfixed_const(wm->yclk);
847	yclk.full = dfixed_div(yclk, a);
848	dram_channels.full = dfixed_const(wm->dram_channels * 4);
849	a.full = dfixed_const(10);
850	dram_efficiency.full = dfixed_const(7);
851	dram_efficiency.full = dfixed_div(dram_efficiency, a);
852	bandwidth.full = dfixed_mul(dram_channels, yclk);
853	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
854
855	return dfixed_trunc(bandwidth);
856}
857
858static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
859{
860	/* Calculate DRAM Bandwidth and the part allocated to display. */
861	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
862	fixed20_12 yclk, dram_channels, bandwidth;
863	fixed20_12 a;
864
865	a.full = dfixed_const(1000);
866	yclk.full = dfixed_const(wm->yclk);
867	yclk.full = dfixed_div(yclk, a);
868	dram_channels.full = dfixed_const(wm->dram_channels * 4);
869	a.full = dfixed_const(10);
870	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
871	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
872	bandwidth.full = dfixed_mul(dram_channels, yclk);
873	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
874
875	return dfixed_trunc(bandwidth);
876}
877
878static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
879{
880	/* Calculate the display Data return Bandwidth */
881	fixed20_12 return_efficiency; /* 0.8 */
882	fixed20_12 sclk, bandwidth;
883	fixed20_12 a;
884
885	a.full = dfixed_const(1000);
886	sclk.full = dfixed_const(wm->sclk);
887	sclk.full = dfixed_div(sclk, a);
888	a.full = dfixed_const(10);
889	return_efficiency.full = dfixed_const(8);
890	return_efficiency.full = dfixed_div(return_efficiency, a);
891	a.full = dfixed_const(32);
892	bandwidth.full = dfixed_mul(a, sclk);
893	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
894
895	return dfixed_trunc(bandwidth);
896}
897
898static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
899{
900	/* Calculate the DMIF Request Bandwidth */
901	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
902	fixed20_12 disp_clk, bandwidth;
903	fixed20_12 a;
904
905	a.full = dfixed_const(1000);
906	disp_clk.full = dfixed_const(wm->disp_clk);
907	disp_clk.full = dfixed_div(disp_clk, a);
908	a.full = dfixed_const(10);
909	disp_clk_request_efficiency.full = dfixed_const(8);
910	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
911	a.full = dfixed_const(32);
912	bandwidth.full = dfixed_mul(a, disp_clk);
913	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
914
915	return dfixed_trunc(bandwidth);
916}
917
918static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
919{
920	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
921	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
922	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
923	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
924
925	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
926}
927
928static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
929{
930	/* Calculate the display mode Average Bandwidth
931	 * DisplayMode should contain the source and destination dimensions,
932	 * timing, etc.
933	 */
934	fixed20_12 bpp;
935	fixed20_12 line_time;
936	fixed20_12 src_width;
937	fixed20_12 bandwidth;
938	fixed20_12 a;
939
940	a.full = dfixed_const(1000);
941	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
942	line_time.full = dfixed_div(line_time, a);
943	bpp.full = dfixed_const(wm->bytes_per_pixel);
944	src_width.full = dfixed_const(wm->src_width);
945	bandwidth.full = dfixed_mul(src_width, bpp);
946	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
947	bandwidth.full = dfixed_div(bandwidth, line_time);
948
949	return dfixed_trunc(bandwidth);
950}
951
952static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
953{
954	/* First calcualte the latency in ns */
955	u32 mc_latency = 2000; /* 2000 ns. */
956	u32 available_bandwidth = evergreen_available_bandwidth(wm);
957	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
958	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
959	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
960	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
961		(wm->num_heads * cursor_line_pair_return_time);
962	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
963	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
964	fixed20_12 a, b, c;
965
966	if (wm->num_heads == 0)
967		return 0;
968
969	a.full = dfixed_const(2);
970	b.full = dfixed_const(1);
971	if ((wm->vsc.full > a.full) ||
972	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
973	    (wm->vtaps >= 5) ||
974	    ((wm->vsc.full >= a.full) && wm->interlaced))
975		max_src_lines_per_dst_line = 4;
976	else
977		max_src_lines_per_dst_line = 2;
978
979	a.full = dfixed_const(available_bandwidth);
980	b.full = dfixed_const(wm->num_heads);
981	a.full = dfixed_div(a, b);
982
983	b.full = dfixed_const(1000);
984	c.full = dfixed_const(wm->disp_clk);
985	b.full = dfixed_div(c, b);
986	c.full = dfixed_const(wm->bytes_per_pixel);
987	b.full = dfixed_mul(b, c);
988
989	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
990
991	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
992	b.full = dfixed_const(1000);
993	c.full = dfixed_const(lb_fill_bw);
994	b.full = dfixed_div(c, b);
995	a.full = dfixed_div(a, b);
996	line_fill_time = dfixed_trunc(a);
997
998	if (line_fill_time < wm->active_time)
999		return latency;
1000	else
1001		return latency + (line_fill_time - wm->active_time);
1002
1003}
1004
1005static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1006{
1007	if (evergreen_average_bandwidth(wm) <=
1008	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
1009		return true;
1010	else
1011		return false;
1012};
1013
1014static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
1015{
1016	if (evergreen_average_bandwidth(wm) <=
1017	    (evergreen_available_bandwidth(wm) / wm->num_heads))
1018		return true;
1019	else
1020		return false;
1021};
1022
1023static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
1024{
1025	u32 lb_partitions = wm->lb_size / wm->src_width;
1026	u32 line_time = wm->active_time + wm->blank_time;
1027	u32 latency_tolerant_lines;
1028	u32 latency_hiding;
1029	fixed20_12 a;
1030
1031	a.full = dfixed_const(1);
1032	if (wm->vsc.full > a.full)
1033		latency_tolerant_lines = 1;
1034	else {
1035		if (lb_partitions <= (wm->vtaps + 1))
1036			latency_tolerant_lines = 1;
1037		else
1038			latency_tolerant_lines = 2;
1039	}
1040
1041	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1042
1043	if (evergreen_latency_watermark(wm) <= latency_hiding)
1044		return true;
1045	else
1046		return false;
1047}
1048
1049static void evergreen_program_watermarks(struct radeon_device *rdev,
1050					 struct radeon_crtc *radeon_crtc,
1051					 u32 lb_size, u32 num_heads)
1052{
1053	struct drm_display_mode *mode = &radeon_crtc->base.mode;
1054	struct evergreen_wm_params wm;
1055	u32 pixel_period;
1056	u32 line_time = 0;
1057	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1058	u32 priority_a_mark = 0, priority_b_mark = 0;
1059	u32 priority_a_cnt = PRIORITY_OFF;
1060	u32 priority_b_cnt = PRIORITY_OFF;
1061	u32 pipe_offset = radeon_crtc->crtc_id * 16;
1062	u32 tmp, arb_control3;
1063	fixed20_12 a, b, c;
1064
1065	if (radeon_crtc->base.enabled && num_heads && mode) {
1066		pixel_period = 1000000 / (u32)mode->clock;
1067		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1068		priority_a_cnt = 0;
1069		priority_b_cnt = 0;
1070
1071		wm.yclk = rdev->pm.current_mclk * 10;
1072		wm.sclk = rdev->pm.current_sclk * 10;
1073		wm.disp_clk = mode->clock;
1074		wm.src_width = mode->crtc_hdisplay;
1075		wm.active_time = mode->crtc_hdisplay * pixel_period;
1076		wm.blank_time = line_time - wm.active_time;
1077		wm.interlaced = false;
1078		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1079			wm.interlaced = true;
1080		wm.vsc = radeon_crtc->vsc;
1081		wm.vtaps = 1;
1082		if (radeon_crtc->rmx_type != RMX_OFF)
1083			wm.vtaps = 2;
1084		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1085		wm.lb_size = lb_size;
1086		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
1087		wm.num_heads = num_heads;
1088
1089		/* set for high clocks */
1090		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
1091		/* set for low clocks */
1092		/* wm.yclk = low clk; wm.sclk = low clk */
1093		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
1094
1095		/* possibly force display priority to high */
1096		/* should really do this at mode validation time... */
1097		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
1098		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
1099		    !evergreen_check_latency_hiding(&wm) ||
1100		    (rdev->disp_priority == 2)) {
1101			DRM_DEBUG_KMS("force priority to high\n");
1102			priority_a_cnt |= PRIORITY_ALWAYS_ON;
1103			priority_b_cnt |= PRIORITY_ALWAYS_ON;
1104		}
1105
1106		a.full = dfixed_const(1000);
1107		b.full = dfixed_const(mode->clock);
1108		b.full = dfixed_div(b, a);
1109		c.full = dfixed_const(latency_watermark_a);
1110		c.full = dfixed_mul(c, b);
1111		c.full = dfixed_mul(c, radeon_crtc->hsc);
1112		c.full = dfixed_div(c, a);
1113		a.full = dfixed_const(16);
1114		c.full = dfixed_div(c, a);
1115		priority_a_mark = dfixed_trunc(c);
1116		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1117
1118		a.full = dfixed_const(1000);
1119		b.full = dfixed_const(mode->clock);
1120		b.full = dfixed_div(b, a);
1121		c.full = dfixed_const(latency_watermark_b);
1122		c.full = dfixed_mul(c, b);
1123		c.full = dfixed_mul(c, radeon_crtc->hsc);
1124		c.full = dfixed_div(c, a);
1125		a.full = dfixed_const(16);
1126		c.full = dfixed_div(c, a);
1127		priority_b_mark = dfixed_trunc(c);
1128		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1129	}
1130
1131	/* select wm A */
1132	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1133	tmp = arb_control3;
1134	tmp &= ~LATENCY_WATERMARK_MASK(3);
1135	tmp |= LATENCY_WATERMARK_MASK(1);
1136	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1137	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1138	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1139		LATENCY_HIGH_WATERMARK(line_time)));
1140	/* select wm B */
1141	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1142	tmp &= ~LATENCY_WATERMARK_MASK(3);
1143	tmp |= LATENCY_WATERMARK_MASK(2);
1144	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1145	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1146	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1147		LATENCY_HIGH_WATERMARK(line_time)));
1148	/* restore original selection */
1149	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1150
1151	/* write the priority marks */
1152	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1153	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1154
1155}
1156
1157/**
1158 * evergreen_bandwidth_update - update display watermarks callback.
1159 *
1160 * @rdev: radeon_device pointer
1161 *
1162 * Update the display watermarks based on the requested mode(s)
1163 * (evergreen+).
1164 */
1165void evergreen_bandwidth_update(struct radeon_device *rdev)
1166{
1167	struct drm_display_mode *mode0 = NULL;
1168	struct drm_display_mode *mode1 = NULL;
1169	u32 num_heads = 0, lb_size;
1170	int i;
1171
1172	radeon_update_display_priority(rdev);
1173
1174	for (i = 0; i < rdev->num_crtc; i++) {
1175		if (rdev->mode_info.crtcs[i]->base.enabled)
1176			num_heads++;
1177	}
1178	for (i = 0; i < rdev->num_crtc; i += 2) {
1179		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1180		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1181		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1182		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1183		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1184		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1185	}
1186}
1187
1188/**
1189 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1190 *
1191 * @rdev: radeon_device pointer
1192 *
1193 * Wait for the MC (memory controller) to be idle.
1194 * (evergreen+).
1195 * Returns 0 if the MC is idle, -1 if not.
1196 */
1197int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
1198{
1199	unsigned i;
1200	u32 tmp;
1201
1202	for (i = 0; i < rdev->usec_timeout; i++) {
1203		/* read MC_STATUS */
1204		tmp = RREG32(SRBM_STATUS) & 0x1F00;
1205		if (!tmp)
1206			return 0;
1207		udelay(1);
1208	}
1209	return -1;
1210}
1211
1212/*
1213 * GART
1214 */
1215void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1216{
1217	unsigned i;
1218	u32 tmp;
1219
1220	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1221
1222	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1223	for (i = 0; i < rdev->usec_timeout; i++) {
1224		/* read MC_STATUS */
1225		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1226		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1227		if (tmp == 2) {
1228			DRM_ERROR("[drm] r600 flush TLB failed\n");
1229			return;
1230		}
1231		if (tmp) {
1232			return;
1233		}
1234		udelay(1);
1235	}
1236}
1237
1238static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1239{
1240	u32 tmp;
1241	int r;
1242
1243	if (rdev->gart.robj == NULL) {
1244		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1245		return -EINVAL;
1246	}
1247	r = radeon_gart_table_vram_pin(rdev);
1248	if (r)
1249		return r;
1250	radeon_gart_restore(rdev);
1251	/* Setup L2 cache */
1252	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1253				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1254				EFFECTIVE_L2_QUEUE_SIZE(7));
1255	WREG32(VM_L2_CNTL2, 0);
1256	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1257	/* Setup TLB control */
1258	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1259		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1260		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1261		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1262	if (rdev->flags & RADEON_IS_IGP) {
1263		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1264		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1265		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1266	} else {
1267		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1268		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1269		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1270		if ((rdev->family == CHIP_JUNIPER) ||
1271		    (rdev->family == CHIP_CYPRESS) ||
1272		    (rdev->family == CHIP_HEMLOCK) ||
1273		    (rdev->family == CHIP_BARTS))
1274			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
1275	}
1276	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1277	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1278	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1279	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1280	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1281	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1282	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1283	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1284				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1285	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1286			(u32)(rdev->dummy_page.addr >> 12));
1287	WREG32(VM_CONTEXT1_CNTL, 0);
1288
1289	evergreen_pcie_gart_tlb_flush(rdev);
1290	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1291		 (unsigned)(rdev->mc.gtt_size >> 20),
1292		 (unsigned long long)rdev->gart.table_addr);
1293	rdev->gart.ready = true;
1294	return 0;
1295}
1296
1297static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1298{
1299	u32 tmp;
1300
1301	/* Disable all tables */
1302	WREG32(VM_CONTEXT0_CNTL, 0);
1303	WREG32(VM_CONTEXT1_CNTL, 0);
1304
1305	/* Setup L2 cache */
1306	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1307				EFFECTIVE_L2_QUEUE_SIZE(7));
1308	WREG32(VM_L2_CNTL2, 0);
1309	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1310	/* Setup TLB control */
1311	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1312	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1313	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1314	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1315	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1316	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1317	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1318	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1319	radeon_gart_table_vram_unpin(rdev);
1320}
1321
1322static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1323{
1324	evergreen_pcie_gart_disable(rdev);
1325	radeon_gart_table_vram_free(rdev);
1326	radeon_gart_fini(rdev);
1327}
1328
1329
1330static void evergreen_agp_enable(struct radeon_device *rdev)
1331{
1332	u32 tmp;
1333
1334	/* Setup L2 cache */
1335	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1336				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1337				EFFECTIVE_L2_QUEUE_SIZE(7));
1338	WREG32(VM_L2_CNTL2, 0);
1339	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1340	/* Setup TLB control */
1341	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1342		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1343		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1344		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1345	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1346	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1347	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1348	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1349	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1350	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1351	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1352	WREG32(VM_CONTEXT0_CNTL, 0);
1353	WREG32(VM_CONTEXT1_CNTL, 0);
1354}
1355
1356void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1357{
1358	u32 crtc_enabled, tmp, frame_count, blackout;
1359	int i, j;
1360
1361	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1362	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1363
1364	/* disable VGA render */
1365	WREG32(VGA_RENDER_CONTROL, 0);
1366	/* blank the display controllers */
1367	for (i = 0; i < rdev->num_crtc; i++) {
1368		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1369		if (crtc_enabled) {
1370			save->crtc_enabled[i] = true;
1371			if (ASIC_IS_DCE6(rdev)) {
1372				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1373				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1374					radeon_wait_for_vblank(rdev, i);
1375					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1376					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1377					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1378				}
1379			} else {
1380				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1381				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1382					radeon_wait_for_vblank(rdev, i);
1383					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1384					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1385					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1386					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1387				}
1388			}
1389			/* wait for the next frame */
1390			frame_count = radeon_get_vblank_counter(rdev, i);
1391			for (j = 0; j < rdev->usec_timeout; j++) {
1392				if (radeon_get_vblank_counter(rdev, i) != frame_count)
1393					break;
1394				udelay(1);
1395			}
1396
1397			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1398			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1399			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1400			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1401			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1402			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1403			save->crtc_enabled[i] = false;
1404			/* ***** */
1405		} else {
1406			save->crtc_enabled[i] = false;
1407		}
1408	}
1409
1410	radeon_mc_wait_for_idle(rdev);
1411
1412	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1413	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1414		/* Block CPU access */
1415		WREG32(BIF_FB_EN, 0);
1416		/* blackout the MC */
1417		blackout &= ~BLACKOUT_MODE_MASK;
1418		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1419	}
1420	/* wait for the MC to settle */
1421	udelay(100);
1422
1423	/* lock double buffered regs */
1424	for (i = 0; i < rdev->num_crtc; i++) {
1425		if (save->crtc_enabled[i]) {
1426			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1427			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1428				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1429				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1430			}
1431			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1432			if (!(tmp & 1)) {
1433				tmp |= 1;
1434				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1435			}
1436		}
1437	}
1438}
1439
1440void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1441{
1442	u32 tmp, frame_count;
1443	int i, j;
1444
1445	/* update crtc base addresses */
1446	for (i = 0; i < rdev->num_crtc; i++) {
1447		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1448		       upper_32_bits(rdev->mc.vram_start));
1449		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1450		       upper_32_bits(rdev->mc.vram_start));
1451		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
1452		       (u32)rdev->mc.vram_start);
1453		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
1454		       (u32)rdev->mc.vram_start);
1455	}
1456	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1457	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1458
1459	/* unlock regs and wait for update */
1460	for (i = 0; i < rdev->num_crtc; i++) {
1461		if (save->crtc_enabled[i]) {
1462			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1463			if ((tmp & 0x3) != 0) {
1464				tmp &= ~0x3;
1465				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1466			}
1467			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1468			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1469				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1470				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1471			}
1472			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1473			if (tmp & 1) {
1474				tmp &= ~1;
1475				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1476			}
1477			for (j = 0; j < rdev->usec_timeout; j++) {
1478				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1479				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1480					break;
1481				udelay(1);
1482			}
1483		}
1484	}
1485
1486	/* unblackout the MC */
1487	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1488	tmp &= ~BLACKOUT_MODE_MASK;
1489	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1490	/* allow CPU access */
1491	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1492
1493	for (i = 0; i < rdev->num_crtc; i++) {
1494		if (save->crtc_enabled[i]) {
1495			if (ASIC_IS_DCE6(rdev)) {
1496				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1497				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1498				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1499				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1500				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1501			} else {
1502				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1503				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1504				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1505				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1506				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1507			}
1508			/* wait for the next frame */
1509			frame_count = radeon_get_vblank_counter(rdev, i);
1510			for (j = 0; j < rdev->usec_timeout; j++) {
1511				if (radeon_get_vblank_counter(rdev, i) != frame_count)
1512					break;
1513				udelay(1);
1514			}
1515		}
1516	}
1517	/* Unlock vga access */
1518	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1519	mdelay(1);
1520	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1521}
1522
1523void evergreen_mc_program(struct radeon_device *rdev)
1524{
1525	struct evergreen_mc_save save;
1526	u32 tmp;
1527	int i, j;
1528
1529	/* Initialize HDP */
1530	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1531		WREG32((0x2c14 + j), 0x00000000);
1532		WREG32((0x2c18 + j), 0x00000000);
1533		WREG32((0x2c1c + j), 0x00000000);
1534		WREG32((0x2c20 + j), 0x00000000);
1535		WREG32((0x2c24 + j), 0x00000000);
1536	}
1537	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1538
1539	evergreen_mc_stop(rdev, &save);
1540	if (evergreen_mc_wait_for_idle(rdev)) {
1541		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1542	}
1543	/* Lockout access through VGA aperture*/
1544	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1545	/* Update configuration */
1546	if (rdev->flags & RADEON_IS_AGP) {
1547		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1548			/* VRAM before AGP */
1549			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1550				rdev->mc.vram_start >> 12);
1551			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1552				rdev->mc.gtt_end >> 12);
1553		} else {
1554			/* VRAM after AGP */
1555			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1556				rdev->mc.gtt_start >> 12);
1557			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1558				rdev->mc.vram_end >> 12);
1559		}
1560	} else {
1561		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1562			rdev->mc.vram_start >> 12);
1563		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1564			rdev->mc.vram_end >> 12);
1565	}
1566	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1567	/* llano/ontario only */
1568	if ((rdev->family == CHIP_PALM) ||
1569	    (rdev->family == CHIP_SUMO) ||
1570	    (rdev->family == CHIP_SUMO2)) {
1571		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1572		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1573		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1574		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1575	}
1576	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1577	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1578	WREG32(MC_VM_FB_LOCATION, tmp);
1579	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1580	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1581	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1582	if (rdev->flags & RADEON_IS_AGP) {
1583		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1584		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1585		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1586	} else {
1587		WREG32(MC_VM_AGP_BASE, 0);
1588		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1589		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1590	}
1591	if (evergreen_mc_wait_for_idle(rdev)) {
1592		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1593	}
1594	evergreen_mc_resume(rdev, &save);
1595	/* we need to own VRAM, so turn off the VGA renderer here
1596	 * to stop it overwriting our objects */
1597	rv515_vga_render_disable(rdev);
1598}
1599
1600/*
1601 * CP.
1602 */
1603void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1604{
1605	struct radeon_ring *ring = &rdev->ring[ib->ring];
1606	u32 next_rptr;
1607
1608	/* set to DX10/11 mode */
1609	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1610	radeon_ring_write(ring, 1);
1611
1612	if (ring->rptr_save_reg) {
1613		next_rptr = ring->wptr + 3 + 4;
1614		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1615		radeon_ring_write(ring, ((ring->rptr_save_reg -
1616					  PACKET3_SET_CONFIG_REG_START) >> 2));
1617		radeon_ring_write(ring, next_rptr);
1618	} else if (rdev->wb.enabled) {
1619		next_rptr = ring->wptr + 5 + 4;
1620		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1621		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1622		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1623		radeon_ring_write(ring, next_rptr);
1624		radeon_ring_write(ring, 0);
1625	}
1626
1627	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1628	radeon_ring_write(ring,
1629#ifdef __BIG_ENDIAN
1630			  (2 << 0) |
1631#endif
1632			  (ib->gpu_addr & 0xFFFFFFFC));
1633	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1634	radeon_ring_write(ring, ib->length_dw);
1635}
1636
1637
1638static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1639{
1640	const __be32 *fw_data;
1641	int i;
1642
1643	if (!rdev->me_fw || !rdev->pfp_fw)
1644		return -EINVAL;
1645
1646	r700_cp_stop(rdev);
1647	WREG32(CP_RB_CNTL,
1648#ifdef __BIG_ENDIAN
1649	       BUF_SWAP_32BIT |
1650#endif
1651	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1652
1653	fw_data = (const __be32 *)rdev->pfp_fw->data;
1654	WREG32(CP_PFP_UCODE_ADDR, 0);
1655	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1656		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1657	WREG32(CP_PFP_UCODE_ADDR, 0);
1658
1659	fw_data = (const __be32 *)rdev->me_fw->data;
1660	WREG32(CP_ME_RAM_WADDR, 0);
1661	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1662		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1663
1664	WREG32(CP_PFP_UCODE_ADDR, 0);
1665	WREG32(CP_ME_RAM_WADDR, 0);
1666	WREG32(CP_ME_RAM_RADDR, 0);
1667	return 0;
1668}
1669
1670static int evergreen_cp_start(struct radeon_device *rdev)
1671{
1672	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1673	int r, i;
1674	uint32_t cp_me;
1675
1676	r = radeon_ring_lock(rdev, ring, 7);
1677	if (r) {
1678		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1679		return r;
1680	}
1681	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1682	radeon_ring_write(ring, 0x1);
1683	radeon_ring_write(ring, 0x0);
1684	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1685	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1686	radeon_ring_write(ring, 0);
1687	radeon_ring_write(ring, 0);
1688	radeon_ring_unlock_commit(rdev, ring);
1689
1690	cp_me = 0xff;
1691	WREG32(CP_ME_CNTL, cp_me);
1692
1693	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
1694	if (r) {
1695		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1696		return r;
1697	}
1698
1699	/* setup clear context state */
1700	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1701	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1702
1703	for (i = 0; i < evergreen_default_size; i++)
1704		radeon_ring_write(ring, evergreen_default_state[i]);
1705
1706	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1707	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1708
1709	/* set clear context state */
1710	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1711	radeon_ring_write(ring, 0);
1712
1713	/* SQ_VTX_BASE_VTX_LOC */
1714	radeon_ring_write(ring, 0xc0026f00);
1715	radeon_ring_write(ring, 0x00000000);
1716	radeon_ring_write(ring, 0x00000000);
1717	radeon_ring_write(ring, 0x00000000);
1718
1719	/* Clear consts */
1720	radeon_ring_write(ring, 0xc0036f00);
1721	radeon_ring_write(ring, 0x00000bc4);
1722	radeon_ring_write(ring, 0xffffffff);
1723	radeon_ring_write(ring, 0xffffffff);
1724	radeon_ring_write(ring, 0xffffffff);
1725
1726	radeon_ring_write(ring, 0xc0026900);
1727	radeon_ring_write(ring, 0x00000316);
1728	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1729	radeon_ring_write(ring, 0x00000010); /*  */
1730
1731	radeon_ring_unlock_commit(rdev, ring);
1732
1733	return 0;
1734}
1735
1736static int evergreen_cp_resume(struct radeon_device *rdev)
1737{
1738	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1739	u32 tmp;
1740	u32 rb_bufsz;
1741	int r;
1742
1743	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1744	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1745				 SOFT_RESET_PA |
1746				 SOFT_RESET_SH |
1747				 SOFT_RESET_VGT |
1748				 SOFT_RESET_SPI |
1749				 SOFT_RESET_SX));
1750	RREG32(GRBM_SOFT_RESET);
1751	mdelay(15);
1752	WREG32(GRBM_SOFT_RESET, 0);
1753	RREG32(GRBM_SOFT_RESET);
1754
1755	/* Set ring buffer size */
1756	rb_bufsz = drm_order(ring->ring_size / 8);
1757	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1758#ifdef __BIG_ENDIAN
1759	tmp |= BUF_SWAP_32BIT;
1760#endif
1761	WREG32(CP_RB_CNTL, tmp);
1762	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1763	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1764
1765	/* Set the write pointer delay */
1766	WREG32(CP_RB_WPTR_DELAY, 0);
1767
1768	/* Initialize the ring buffer's read and write pointers */
1769	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1770	WREG32(CP_RB_RPTR_WR, 0);
1771	ring->wptr = 0;
1772	WREG32(CP_RB_WPTR, ring->wptr);
1773
1774	/* set the wb address whether it's enabled or not */
1775	WREG32(CP_RB_RPTR_ADDR,
1776	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1777	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1778	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1779
1780	if (rdev->wb.enabled)
1781		WREG32(SCRATCH_UMSK, 0xff);
1782	else {
1783		tmp |= RB_NO_UPDATE;
1784		WREG32(SCRATCH_UMSK, 0);
1785	}
1786
1787	mdelay(1);
1788	WREG32(CP_RB_CNTL, tmp);
1789
1790	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
1791	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1792
1793	ring->rptr = RREG32(CP_RB_RPTR);
1794
1795	evergreen_cp_start(rdev);
1796	ring->ready = true;
1797	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1798	if (r) {
1799		ring->ready = false;
1800		return r;
1801	}
1802	return 0;
1803}
1804
1805/*
1806 * Core functions
1807 */
1808static void evergreen_gpu_init(struct radeon_device *rdev)
1809{
1810	u32 gb_addr_config;
1811	u32 mc_shared_chmap, mc_arb_ramcfg;
1812	u32 sx_debug_1;
1813	u32 smx_dc_ctl0;
1814	u32 sq_config;
1815	u32 sq_lds_resource_mgmt;
1816	u32 sq_gpr_resource_mgmt_1;
1817	u32 sq_gpr_resource_mgmt_2;
1818	u32 sq_gpr_resource_mgmt_3;
1819	u32 sq_thread_resource_mgmt;
1820	u32 sq_thread_resource_mgmt_2;
1821	u32 sq_stack_resource_mgmt_1;
1822	u32 sq_stack_resource_mgmt_2;
1823	u32 sq_stack_resource_mgmt_3;
1824	u32 vgt_cache_invalidation;
1825	u32 hdp_host_path_cntl, tmp;
1826	u32 disabled_rb_mask;
1827	int i, j, num_shader_engines, ps_thread_count;
1828
1829	switch (rdev->family) {
1830	case CHIP_CYPRESS:
1831	case CHIP_HEMLOCK:
1832		rdev->config.evergreen.num_ses = 2;
1833		rdev->config.evergreen.max_pipes = 4;
1834		rdev->config.evergreen.max_tile_pipes = 8;
1835		rdev->config.evergreen.max_simds = 10;
1836		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1837		rdev->config.evergreen.max_gprs = 256;
1838		rdev->config.evergreen.max_threads = 248;
1839		rdev->config.evergreen.max_gs_threads = 32;
1840		rdev->config.evergreen.max_stack_entries = 512;
1841		rdev->config.evergreen.sx_num_of_sets = 4;
1842		rdev->config.evergreen.sx_max_export_size = 256;
1843		rdev->config.evergreen.sx_max_export_pos_size = 64;
1844		rdev->config.evergreen.sx_max_export_smx_size = 192;
1845		rdev->config.evergreen.max_hw_contexts = 8;
1846		rdev->config.evergreen.sq_num_cf_insts = 2;
1847
1848		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1849		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1850		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1851		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
1852		break;
1853	case CHIP_JUNIPER:
1854		rdev->config.evergreen.num_ses = 1;
1855		rdev->config.evergreen.max_pipes = 4;
1856		rdev->config.evergreen.max_tile_pipes = 4;
1857		rdev->config.evergreen.max_simds = 10;
1858		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1859		rdev->config.evergreen.max_gprs = 256;
1860		rdev->config.evergreen.max_threads = 248;
1861		rdev->config.evergreen.max_gs_threads = 32;
1862		rdev->config.evergreen.max_stack_entries = 512;
1863		rdev->config.evergreen.sx_num_of_sets = 4;
1864		rdev->config.evergreen.sx_max_export_size = 256;
1865		rdev->config.evergreen.sx_max_export_pos_size = 64;
1866		rdev->config.evergreen.sx_max_export_smx_size = 192;
1867		rdev->config.evergreen.max_hw_contexts = 8;
1868		rdev->config.evergreen.sq_num_cf_insts = 2;
1869
1870		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1871		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1872		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1873		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
1874		break;
1875	case CHIP_REDWOOD:
1876		rdev->config.evergreen.num_ses = 1;
1877		rdev->config.evergreen.max_pipes = 4;
1878		rdev->config.evergreen.max_tile_pipes = 4;
1879		rdev->config.evergreen.max_simds = 5;
1880		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1881		rdev->config.evergreen.max_gprs = 256;
1882		rdev->config.evergreen.max_threads = 248;
1883		rdev->config.evergreen.max_gs_threads = 32;
1884		rdev->config.evergreen.max_stack_entries = 256;
1885		rdev->config.evergreen.sx_num_of_sets = 4;
1886		rdev->config.evergreen.sx_max_export_size = 256;
1887		rdev->config.evergreen.sx_max_export_pos_size = 64;
1888		rdev->config.evergreen.sx_max_export_smx_size = 192;
1889		rdev->config.evergreen.max_hw_contexts = 8;
1890		rdev->config.evergreen.sq_num_cf_insts = 2;
1891
1892		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1893		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1894		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1895		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1896		break;
1897	case CHIP_CEDAR:
1898	default:
1899		rdev->config.evergreen.num_ses = 1;
1900		rdev->config.evergreen.max_pipes = 2;
1901		rdev->config.evergreen.max_tile_pipes = 2;
1902		rdev->config.evergreen.max_simds = 2;
1903		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1904		rdev->config.evergreen.max_gprs = 256;
1905		rdev->config.evergreen.max_threads = 192;
1906		rdev->config.evergreen.max_gs_threads = 16;
1907		rdev->config.evergreen.max_stack_entries = 256;
1908		rdev->config.evergreen.sx_num_of_sets = 4;
1909		rdev->config.evergreen.sx_max_export_size = 128;
1910		rdev->config.evergreen.sx_max_export_pos_size = 32;
1911		rdev->config.evergreen.sx_max_export_smx_size = 96;
1912		rdev->config.evergreen.max_hw_contexts = 4;
1913		rdev->config.evergreen.sq_num_cf_insts = 1;
1914
1915		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1916		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1917		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1918		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1919		break;
1920	case CHIP_PALM:
1921		rdev->config.evergreen.num_ses = 1;
1922		rdev->config.evergreen.max_pipes = 2;
1923		rdev->config.evergreen.max_tile_pipes = 2;
1924		rdev->config.evergreen.max_simds = 2;
1925		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1926		rdev->config.evergreen.max_gprs = 256;
1927		rdev->config.evergreen.max_threads = 192;
1928		rdev->config.evergreen.max_gs_threads = 16;
1929		rdev->config.evergreen.max_stack_entries = 256;
1930		rdev->config.evergreen.sx_num_of_sets = 4;
1931		rdev->config.evergreen.sx_max_export_size = 128;
1932		rdev->config.evergreen.sx_max_export_pos_size = 32;
1933		rdev->config.evergreen.sx_max_export_smx_size = 96;
1934		rdev->config.evergreen.max_hw_contexts = 4;
1935		rdev->config.evergreen.sq_num_cf_insts = 1;
1936
1937		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1938		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1939		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1940		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1941		break;
1942	case CHIP_SUMO:
1943		rdev->config.evergreen.num_ses = 1;
1944		rdev->config.evergreen.max_pipes = 4;
1945		rdev->config.evergreen.max_tile_pipes = 4;
1946		if (rdev->ddev->pci_device == 0x9648)
1947			rdev->config.evergreen.max_simds = 3;
1948		else if ((rdev->ddev->pci_device == 0x9647) ||
1949			 (rdev->ddev->pci_device == 0x964a))
1950			rdev->config.evergreen.max_simds = 4;
1951		else
1952			rdev->config.evergreen.max_simds = 5;
1953		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1954		rdev->config.evergreen.max_gprs = 256;
1955		rdev->config.evergreen.max_threads = 248;
1956		rdev->config.evergreen.max_gs_threads = 32;
1957		rdev->config.evergreen.max_stack_entries = 256;
1958		rdev->config.evergreen.sx_num_of_sets = 4;
1959		rdev->config.evergreen.sx_max_export_size = 256;
1960		rdev->config.evergreen.sx_max_export_pos_size = 64;
1961		rdev->config.evergreen.sx_max_export_smx_size = 192;
1962		rdev->config.evergreen.max_hw_contexts = 8;
1963		rdev->config.evergreen.sq_num_cf_insts = 2;
1964
1965		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1966		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1967		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1968		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
1969		break;
1970	case CHIP_SUMO2:
1971		rdev->config.evergreen.num_ses = 1;
1972		rdev->config.evergreen.max_pipes = 4;
1973		rdev->config.evergreen.max_tile_pipes = 4;
1974		rdev->config.evergreen.max_simds = 2;
1975		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1976		rdev->config.evergreen.max_gprs = 256;
1977		rdev->config.evergreen.max_threads = 248;
1978		rdev->config.evergreen.max_gs_threads = 32;
1979		rdev->config.evergreen.max_stack_entries = 512;
1980		rdev->config.evergreen.sx_num_of_sets = 4;
1981		rdev->config.evergreen.sx_max_export_size = 256;
1982		rdev->config.evergreen.sx_max_export_pos_size = 64;
1983		rdev->config.evergreen.sx_max_export_smx_size = 192;
1984		rdev->config.evergreen.max_hw_contexts = 8;
1985		rdev->config.evergreen.sq_num_cf_insts = 2;
1986
1987		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1988		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1989		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1990		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
1991		break;
1992	case CHIP_BARTS:
1993		rdev->config.evergreen.num_ses = 2;
1994		rdev->config.evergreen.max_pipes = 4;
1995		rdev->config.evergreen.max_tile_pipes = 8;
1996		rdev->config.evergreen.max_simds = 7;
1997		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1998		rdev->config.evergreen.max_gprs = 256;
1999		rdev->config.evergreen.max_threads = 248;
2000		rdev->config.evergreen.max_gs_threads = 32;
2001		rdev->config.evergreen.max_stack_entries = 512;
2002		rdev->config.evergreen.sx_num_of_sets = 4;
2003		rdev->config.evergreen.sx_max_export_size = 256;
2004		rdev->config.evergreen.sx_max_export_pos_size = 64;
2005		rdev->config.evergreen.sx_max_export_smx_size = 192;
2006		rdev->config.evergreen.max_hw_contexts = 8;
2007		rdev->config.evergreen.sq_num_cf_insts = 2;
2008
2009		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2010		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2011		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
2012		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
2013		break;
2014	case CHIP_TURKS:
2015		rdev->config.evergreen.num_ses = 1;
2016		rdev->config.evergreen.max_pipes = 4;
2017		rdev->config.evergreen.max_tile_pipes = 4;
2018		rdev->config.evergreen.max_simds = 6;
2019		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2020		rdev->config.evergreen.max_gprs = 256;
2021		rdev->config.evergreen.max_threads = 248;
2022		rdev->config.evergreen.max_gs_threads = 32;
2023		rdev->config.evergreen.max_stack_entries = 256;
2024		rdev->config.evergreen.sx_num_of_sets = 4;
2025		rdev->config.evergreen.sx_max_export_size = 256;
2026		rdev->config.evergreen.sx_max_export_pos_size = 64;
2027		rdev->config.evergreen.sx_max_export_smx_size = 192;
2028		rdev->config.evergreen.max_hw_contexts = 8;
2029		rdev->config.evergreen.sq_num_cf_insts = 2;
2030
2031		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2032		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2033		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
2034		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
2035		break;
2036	case CHIP_CAICOS:
2037		rdev->config.evergreen.num_ses = 1;
2038		rdev->config.evergreen.max_pipes = 2;
2039		rdev->config.evergreen.max_tile_pipes = 2;
2040		rdev->config.evergreen.max_simds = 2;
2041		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2042		rdev->config.evergreen.max_gprs = 256;
2043		rdev->config.evergreen.max_threads = 192;
2044		rdev->config.evergreen.max_gs_threads = 16;
2045		rdev->config.evergreen.max_stack_entries = 256;
2046		rdev->config.evergreen.sx_num_of_sets = 4;
2047		rdev->config.evergreen.sx_max_export_size = 128;
2048		rdev->config.evergreen.sx_max_export_pos_size = 32;
2049		rdev->config.evergreen.sx_max_export_smx_size = 96;
2050		rdev->config.evergreen.max_hw_contexts = 4;
2051		rdev->config.evergreen.sq_num_cf_insts = 1;
2052
2053		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2054		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2055		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
2056		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
2057		break;
2058	}
2059
2060	/* Initialize HDP */
2061	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2062		WREG32((0x2c14 + j), 0x00000000);
2063		WREG32((0x2c18 + j), 0x00000000);
2064		WREG32((0x2c1c + j), 0x00000000);
2065		WREG32((0x2c20 + j), 0x00000000);
2066		WREG32((0x2c24 + j), 0x00000000);
2067	}
2068
2069	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2070
2071	evergreen_fix_pci_max_read_req_size(rdev);
2072
2073	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
2074	if ((rdev->family == CHIP_PALM) ||
2075	    (rdev->family == CHIP_SUMO) ||
2076	    (rdev->family == CHIP_SUMO2))
2077		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
2078	else
2079		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
2080
2081	/* setup tiling info dword.  gb_addr_config is not adequate since it does
2082	 * not have bank info, so create a custom tiling dword.
2083	 * bits 3:0   num_pipes
2084	 * bits 7:4   num_banks
2085	 * bits 11:8  group_size
2086	 * bits 15:12 row_size
2087	 */
2088	rdev->config.evergreen.tile_config = 0;
2089	switch (rdev->config.evergreen.max_tile_pipes) {
2090	case 1:
2091	default:
2092		rdev->config.evergreen.tile_config |= (0 << 0);
2093		break;
2094	case 2:
2095		rdev->config.evergreen.tile_config |= (1 << 0);
2096		break;
2097	case 4:
2098		rdev->config.evergreen.tile_config |= (2 << 0);
2099		break;
2100	case 8:
2101		rdev->config.evergreen.tile_config |= (3 << 0);
2102		break;
2103	}
2104	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2105	if (rdev->flags & RADEON_IS_IGP)
2106		rdev->config.evergreen.tile_config |= 1 << 4;
2107	else {
2108		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2109		case 0: /* four banks */
2110			rdev->config.evergreen.tile_config |= 0 << 4;
2111			break;
2112		case 1: /* eight banks */
2113			rdev->config.evergreen.tile_config |= 1 << 4;
2114			break;
2115		case 2: /* sixteen banks */
2116		default:
2117			rdev->config.evergreen.tile_config |= 2 << 4;
2118			break;
2119		}
2120	}
2121	rdev->config.evergreen.tile_config |= 0 << 8;
2122	rdev->config.evergreen.tile_config |=
2123		((gb_addr_config & 0x30000000) >> 28) << 12;
2124
2125	num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2126
2127	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2128		u32 efuse_straps_4;
2129		u32 efuse_straps_3;
2130
2131		WREG32(RCU_IND_INDEX, 0x204);
2132		efuse_straps_4 = RREG32(RCU_IND_DATA);
2133		WREG32(RCU_IND_INDEX, 0x203);
2134		efuse_straps_3 = RREG32(RCU_IND_DATA);
2135		tmp = (((efuse_straps_4 & 0xf) << 4) |
2136		      ((efuse_straps_3 & 0xf0000000) >> 28));
2137	} else {
2138		tmp = 0;
2139		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
2140			u32 rb_disable_bitmap;
2141
2142			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2143			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2144			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
2145			tmp <<= 4;
2146			tmp |= rb_disable_bitmap;
2147		}
2148	}
2149	/* enabled rb are just the one not disabled :) */
2150	disabled_rb_mask = tmp;
2151
2152	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2153	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2154
2155	WREG32(GB_ADDR_CONFIG, gb_addr_config);
2156	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2157	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2158	WREG32(DMA_TILING_CONFIG, gb_addr_config);
2159
2160	if ((rdev->config.evergreen.max_backends == 1) &&
2161	    (rdev->flags & RADEON_IS_IGP)) {
2162		if ((disabled_rb_mask & 3) == 1) {
2163			/* RB0 disabled, RB1 enabled */
2164			tmp = 0x11111111;
2165		} else {
2166			/* RB1 disabled, RB0 enabled */
2167			tmp = 0x00000000;
2168		}
2169	} else {
2170		tmp = gb_addr_config & NUM_PIPES_MASK;
2171		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2172						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2173	}
2174	WREG32(GB_BACKEND_MAP, tmp);
2175
2176	WREG32(CGTS_SYS_TCC_DISABLE, 0);
2177	WREG32(CGTS_TCC_DISABLE, 0);
2178	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2179	WREG32(CGTS_USER_TCC_DISABLE, 0);
2180
2181	/* set HW defaults for 3D engine */
2182	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2183				     ROQ_IB2_START(0x2b)));
2184
2185	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2186
2187	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2188			     SYNC_GRADIENT |
2189			     SYNC_WALKER |
2190			     SYNC_ALIGNER));
2191
2192	sx_debug_1 = RREG32(SX_DEBUG_1);
2193	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2194	WREG32(SX_DEBUG_1, sx_debug_1);
2195
2196
2197	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2198	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2199	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2200	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2201
2202	if (rdev->family <= CHIP_SUMO2)
2203		WREG32(SMX_SAR_CTL0, 0x00010000);
2204
2205	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2206					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2207					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2208
2209	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2210				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2211				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2212
2213	WREG32(VGT_NUM_INSTANCES, 1);
2214	WREG32(SPI_CONFIG_CNTL, 0);
2215	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2216	WREG32(CP_PERFMON_CNTL, 0);
2217
2218	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2219				  FETCH_FIFO_HIWATER(0x4) |
2220				  DONE_FIFO_HIWATER(0xe0) |
2221				  ALU_UPDATE_FIFO_HIWATER(0x8)));
2222
2223	sq_config = RREG32(SQ_CONFIG);
2224	sq_config &= ~(PS_PRIO(3) |
2225		       VS_PRIO(3) |
2226		       GS_PRIO(3) |
2227		       ES_PRIO(3));
2228	sq_config |= (VC_ENABLE |
2229		      EXPORT_SRC_C |
2230		      PS_PRIO(0) |
2231		      VS_PRIO(1) |
2232		      GS_PRIO(2) |
2233		      ES_PRIO(3));
2234
2235	switch (rdev->family) {
2236	case CHIP_CEDAR:
2237	case CHIP_PALM:
2238	case CHIP_SUMO:
2239	case CHIP_SUMO2:
2240	case CHIP_CAICOS:
2241		/* no vertex cache */
2242		sq_config &= ~VC_ENABLE;
2243		break;
2244	default:
2245		break;
2246	}
2247
2248	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2249
2250	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2251	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2252	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2253	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2254	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2255	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2256	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2257
2258	switch (rdev->family) {
2259	case CHIP_CEDAR:
2260	case CHIP_PALM:
2261	case CHIP_SUMO:
2262	case CHIP_SUMO2:
2263		ps_thread_count = 96;
2264		break;
2265	default:
2266		ps_thread_count = 128;
2267		break;
2268	}
2269
2270	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
2271	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2272	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2273	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2274	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2275	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2276
2277	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2278	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2279	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2280	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2281	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2282	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2283
2284	WREG32(SQ_CONFIG, sq_config);
2285	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2286	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2287	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2288	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2289	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2290	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2291	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2292	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2293	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2294	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2295
2296	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2297					  FORCE_EOV_MAX_REZ_CNT(255)));
2298
2299	switch (rdev->family) {
2300	case CHIP_CEDAR:
2301	case CHIP_PALM:
2302	case CHIP_SUMO:
2303	case CHIP_SUMO2:
2304	case CHIP_CAICOS:
2305		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2306		break;
2307	default:
2308		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2309		break;
2310	}
2311	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2312	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2313
2314	WREG32(VGT_GS_VERTEX_REUSE, 16);
2315	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2316	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2317
2318	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2319	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2320
2321	WREG32(CB_PERF_CTR0_SEL_0, 0);
2322	WREG32(CB_PERF_CTR0_SEL_1, 0);
2323	WREG32(CB_PERF_CTR1_SEL_0, 0);
2324	WREG32(CB_PERF_CTR1_SEL_1, 0);
2325	WREG32(CB_PERF_CTR2_SEL_0, 0);
2326	WREG32(CB_PERF_CTR2_SEL_1, 0);
2327	WREG32(CB_PERF_CTR3_SEL_0, 0);
2328	WREG32(CB_PERF_CTR3_SEL_1, 0);
2329
2330	/* clear render buffer base addresses */
2331	WREG32(CB_COLOR0_BASE, 0);
2332	WREG32(CB_COLOR1_BASE, 0);
2333	WREG32(CB_COLOR2_BASE, 0);
2334	WREG32(CB_COLOR3_BASE, 0);
2335	WREG32(CB_COLOR4_BASE, 0);
2336	WREG32(CB_COLOR5_BASE, 0);
2337	WREG32(CB_COLOR6_BASE, 0);
2338	WREG32(CB_COLOR7_BASE, 0);
2339	WREG32(CB_COLOR8_BASE, 0);
2340	WREG32(CB_COLOR9_BASE, 0);
2341	WREG32(CB_COLOR10_BASE, 0);
2342	WREG32(CB_COLOR11_BASE, 0);
2343
2344	/* set the shader const cache sizes to 0 */
2345	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2346		WREG32(i, 0);
2347	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2348		WREG32(i, 0);
2349
2350	tmp = RREG32(HDP_MISC_CNTL);
2351	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2352	WREG32(HDP_MISC_CNTL, tmp);
2353
2354	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2355	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2356
2357	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2358
2359	udelay(50);
2360
2361}
2362
2363int evergreen_mc_init(struct radeon_device *rdev)
2364{
2365	u32 tmp;
2366	int chansize, numchan;
2367
2368	/* Get VRAM informations */
2369	rdev->mc.vram_is_ddr = true;
2370	if ((rdev->family == CHIP_PALM) ||
2371	    (rdev->family == CHIP_SUMO) ||
2372	    (rdev->family == CHIP_SUMO2))
2373		tmp = RREG32(FUS_MC_ARB_RAMCFG);
2374	else
2375		tmp = RREG32(MC_ARB_RAMCFG);
2376	if (tmp & CHANSIZE_OVERRIDE) {
2377		chansize = 16;
2378	} else if (tmp & CHANSIZE_MASK) {
2379		chansize = 64;
2380	} else {
2381		chansize = 32;
2382	}
2383	tmp = RREG32(MC_SHARED_CHMAP);
2384	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2385	case 0:
2386	default:
2387		numchan = 1;
2388		break;
2389	case 1:
2390		numchan = 2;
2391		break;
2392	case 2:
2393		numchan = 4;
2394		break;
2395	case 3:
2396		numchan = 8;
2397		break;
2398	}
2399	rdev->mc.vram_width = numchan * chansize;
2400	/* Could aper size report 0 ? */
2401	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2402	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2403	/* Setup GPU memory space */
2404	if ((rdev->family == CHIP_PALM) ||
2405	    (rdev->family == CHIP_SUMO) ||
2406	    (rdev->family == CHIP_SUMO2)) {
2407		/* size in bytes on fusion */
2408		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2409		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2410	} else {
2411		/* size in MB on evergreen/cayman/tn */
2412		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2413		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2414	}
2415	rdev->mc.visible_vram_size = rdev->mc.aper_size;
2416	r700_vram_gtt_location(rdev, &rdev->mc);
2417	radeon_update_bandwidth_info(rdev);
2418
2419	return 0;
2420}
2421
2422bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2423{
2424	u32 srbm_status;
2425	u32 grbm_status;
2426	u32 grbm_status_se0, grbm_status_se1;
2427
2428	srbm_status = RREG32(SRBM_STATUS);
2429	grbm_status = RREG32(GRBM_STATUS);
2430	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2431	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2432	if (!(grbm_status & GUI_ACTIVE)) {
2433		radeon_ring_lockup_update(ring);
2434		return false;
2435	}
2436	/* force CP activities */
2437	radeon_ring_force_activity(rdev, ring);
2438	return radeon_ring_test_lockup(rdev, ring);
2439}
2440
2441static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2442{
2443	u32 grbm_reset = 0;
2444
2445	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2446		return;
2447
2448	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
2449		RREG32(GRBM_STATUS));
2450	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
2451		RREG32(GRBM_STATUS_SE0));
2452	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
2453		RREG32(GRBM_STATUS_SE1));
2454	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
2455		RREG32(SRBM_STATUS));
2456	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2457		RREG32(CP_STALLED_STAT1));
2458	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2459		RREG32(CP_STALLED_STAT2));
2460	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
2461		RREG32(CP_BUSY_STAT));
2462	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
2463		RREG32(CP_STAT));
2464
2465	/* Disable CP parsing/prefetching */
2466	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2467
2468	/* reset all the gfx blocks */
2469	grbm_reset = (SOFT_RESET_CP |
2470		      SOFT_RESET_CB |
2471		      SOFT_RESET_DB |
2472		      SOFT_RESET_PA |
2473		      SOFT_RESET_SC |
2474		      SOFT_RESET_SPI |
2475		      SOFT_RESET_SH |
2476		      SOFT_RESET_SX |
2477		      SOFT_RESET_TC |
2478		      SOFT_RESET_TA |
2479		      SOFT_RESET_VC |
2480		      SOFT_RESET_VGT);
2481
2482	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2483	WREG32(GRBM_SOFT_RESET, grbm_reset);
2484	(void)RREG32(GRBM_SOFT_RESET);
2485	udelay(50);
2486	WREG32(GRBM_SOFT_RESET, 0);
2487	(void)RREG32(GRBM_SOFT_RESET);
2488
2489	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
2490		RREG32(GRBM_STATUS));
2491	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
2492		RREG32(GRBM_STATUS_SE0));
2493	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
2494		RREG32(GRBM_STATUS_SE1));
2495	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
2496		RREG32(SRBM_STATUS));
2497	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2498		RREG32(CP_STALLED_STAT1));
2499	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2500		RREG32(CP_STALLED_STAT2));
2501	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
2502		RREG32(CP_BUSY_STAT));
2503	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
2504		RREG32(CP_STAT));
2505}
2506
2507static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
2508{
2509	u32 tmp;
2510
2511	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2512		return;
2513
2514	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
2515		RREG32(DMA_STATUS_REG));
2516
2517	/* Disable DMA */
2518	tmp = RREG32(DMA_RB_CNTL);
2519	tmp &= ~DMA_RB_ENABLE;
2520	WREG32(DMA_RB_CNTL, tmp);
2521
2522	/* Reset dma */
2523	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2524	RREG32(SRBM_SOFT_RESET);
2525	udelay(50);
2526	WREG32(SRBM_SOFT_RESET, 0);
2527
2528	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
2529		RREG32(DMA_STATUS_REG));
2530}
2531
2532static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2533{
2534	struct evergreen_mc_save save;
2535
2536	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2537		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2538
2539	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2540		reset_mask &= ~RADEON_RESET_DMA;
2541
2542	if (reset_mask == 0)
2543		return 0;
2544
2545	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2546
2547	evergreen_mc_stop(rdev, &save);
2548	if (evergreen_mc_wait_for_idle(rdev)) {
2549		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2550	}
2551
2552	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
2553		evergreen_gpu_soft_reset_gfx(rdev);
2554
2555	if (reset_mask & RADEON_RESET_DMA)
2556		evergreen_gpu_soft_reset_dma(rdev);
2557
2558	/* Wait a little for things to settle down */
2559	udelay(50);
2560
2561	evergreen_mc_resume(rdev, &save);
2562	return 0;
2563}
2564
2565int evergreen_asic_reset(struct radeon_device *rdev)
2566{
2567	return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
2568					       RADEON_RESET_COMPUTE |
2569					       RADEON_RESET_DMA));
2570}
2571
2572/* Interrupts */
2573
2574u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2575{
2576	if (crtc >= rdev->num_crtc)
2577		return 0;
2578	else
2579		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
2580}
2581
2582void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2583{
2584	u32 tmp;
2585
2586	if (rdev->family >= CHIP_CAYMAN) {
2587		cayman_cp_int_cntl_setup(rdev, 0,
2588					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2589		cayman_cp_int_cntl_setup(rdev, 1, 0);
2590		cayman_cp_int_cntl_setup(rdev, 2, 0);
2591		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2592		WREG32(CAYMAN_DMA1_CNTL, tmp);
2593	} else
2594		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2595	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2596	WREG32(DMA_CNTL, tmp);
2597	WREG32(GRBM_INT_CNTL, 0);
2598	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2599	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2600	if (rdev->num_crtc >= 4) {
2601		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2602		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2603	}
2604	if (rdev->num_crtc >= 6) {
2605		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2606		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2607	}
2608
2609	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2610	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2611	if (rdev->num_crtc >= 4) {
2612		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2613		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2614	}
2615	if (rdev->num_crtc >= 6) {
2616		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2617		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2618	}
2619
2620	/* only one DAC on DCE6 */
2621	if (!ASIC_IS_DCE6(rdev))
2622		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2623	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2624
2625	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2626	WREG32(DC_HPD1_INT_CONTROL, tmp);
2627	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2628	WREG32(DC_HPD2_INT_CONTROL, tmp);
2629	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2630	WREG32(DC_HPD3_INT_CONTROL, tmp);
2631	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2632	WREG32(DC_HPD4_INT_CONTROL, tmp);
2633	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2634	WREG32(DC_HPD5_INT_CONTROL, tmp);
2635	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2636	WREG32(DC_HPD6_INT_CONTROL, tmp);
2637
2638}
2639
2640int evergreen_irq_set(struct radeon_device *rdev)
2641{
2642	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2643	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
2644	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2645	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2646	u32 grbm_int_cntl = 0;
2647	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2648	u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
2649	u32 dma_cntl, dma_cntl1 = 0;
2650
2651	if (!rdev->irq.installed) {
2652		dev_warn(rdev->dev, "Can't enable IRQ/MSI because no handler is installed\n");
2653		return -EINVAL;
2654	}
2655	/* don't enable anything if the ih is disabled */
2656	if (!rdev->ih.enabled) {
2657		r600_disable_interrupts(rdev);
2658		/* force the active interrupt state to all disabled */
2659		evergreen_disable_interrupt_state(rdev);
2660		return 0;
2661	}
2662
2663	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2664	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2665	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2666	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2667	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2668	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2669
2670	afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2671	afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2672	afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2673	afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2674	afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2675	afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2676
2677	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2678
2679	if (rdev->family >= CHIP_CAYMAN) {
2680		/* enable CP interrupts on all rings */
2681		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2682			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2683			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2684		}
2685		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
2686			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2687			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2688		}
2689		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
2690			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2691			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2692		}
2693	} else {
2694		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2695			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2696			cp_int_cntl |= RB_INT_ENABLE;
2697			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2698		}
2699	}
2700
2701	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
2702		DRM_DEBUG("r600_irq_set: sw int dma\n");
2703		dma_cntl |= TRAP_ENABLE;
2704	}
2705
2706	if (rdev->family >= CHIP_CAYMAN) {
2707		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2708		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
2709			DRM_DEBUG("r600_irq_set: sw int dma1\n");
2710			dma_cntl1 |= TRAP_ENABLE;
2711		}
2712	}
2713
2714	if (rdev->irq.crtc_vblank_int[0] ||
2715	    atomic_read(&rdev->irq.pflip[0])) {
2716		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2717		crtc1 |= VBLANK_INT_MASK;
2718	}
2719	if (rdev->irq.crtc_vblank_int[1] ||
2720	    atomic_read(&rdev->irq.pflip[1])) {
2721		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2722		crtc2 |= VBLANK_INT_MASK;
2723	}
2724	if (rdev->irq.crtc_vblank_int[2] ||
2725	    atomic_read(&rdev->irq.pflip[2])) {
2726		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2727		crtc3 |= VBLANK_INT_MASK;
2728	}
2729	if (rdev->irq.crtc_vblank_int[3] ||
2730	    atomic_read(&rdev->irq.pflip[3])) {
2731		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2732		crtc4 |= VBLANK_INT_MASK;
2733	}
2734	if (rdev->irq.crtc_vblank_int[4] ||
2735	    atomic_read(&rdev->irq.pflip[4])) {
2736		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2737		crtc5 |= VBLANK_INT_MASK;
2738	}
2739	if (rdev->irq.crtc_vblank_int[5] ||
2740	    atomic_read(&rdev->irq.pflip[5])) {
2741		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2742		crtc6 |= VBLANK_INT_MASK;
2743	}
2744	if (rdev->irq.hpd[0]) {
2745		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2746		hpd1 |= DC_HPDx_INT_EN;
2747	}
2748	if (rdev->irq.hpd[1]) {
2749		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2750		hpd2 |= DC_HPDx_INT_EN;
2751	}
2752	if (rdev->irq.hpd[2]) {
2753		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2754		hpd3 |= DC_HPDx_INT_EN;
2755	}
2756	if (rdev->irq.hpd[3]) {
2757		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2758		hpd4 |= DC_HPDx_INT_EN;
2759	}
2760	if (rdev->irq.hpd[4]) {
2761		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2762		hpd5 |= DC_HPDx_INT_EN;
2763	}
2764	if (rdev->irq.hpd[5]) {
2765		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2766		hpd6 |= DC_HPDx_INT_EN;
2767	}
2768	if (rdev->irq.afmt[0]) {
2769		DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
2770		afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2771	}
2772	if (rdev->irq.afmt[1]) {
2773		DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
2774		afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2775	}
2776	if (rdev->irq.afmt[2]) {
2777		DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
2778		afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2779	}
2780	if (rdev->irq.afmt[3]) {
2781		DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
2782		afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2783	}
2784	if (rdev->irq.afmt[4]) {
2785		DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
2786		afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2787	}
2788	if (rdev->irq.afmt[5]) {
2789		DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2790		afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2791	}
2792
2793	if (rdev->family >= CHIP_CAYMAN) {
2794		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2795		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2796		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2797	} else
2798		WREG32(CP_INT_CNTL, cp_int_cntl);
2799
2800	WREG32(DMA_CNTL, dma_cntl);
2801
2802	if (rdev->family >= CHIP_CAYMAN)
2803		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
2804
2805	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2806
2807	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2808	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2809	if (rdev->num_crtc >= 4) {
2810		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2811		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2812	}
2813	if (rdev->num_crtc >= 6) {
2814		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2815		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2816	}
2817
2818	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2819	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2820	if (rdev->num_crtc >= 4) {
2821		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2822		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2823	}
2824	if (rdev->num_crtc >= 6) {
2825		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2826		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2827	}
2828
2829	WREG32(DC_HPD1_INT_CONTROL, hpd1);
2830	WREG32(DC_HPD2_INT_CONTROL, hpd2);
2831	WREG32(DC_HPD3_INT_CONTROL, hpd3);
2832	WREG32(DC_HPD4_INT_CONTROL, hpd4);
2833	WREG32(DC_HPD5_INT_CONTROL, hpd5);
2834	WREG32(DC_HPD6_INT_CONTROL, hpd6);
2835
2836	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
2837	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
2838	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
2839	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
2840	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
2841	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
2842
2843	return 0;
2844}
2845
2846static void evergreen_irq_ack(struct radeon_device *rdev)
2847{
2848	u32 tmp;
2849
2850	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2851	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2852	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2853	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2854	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2855	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2856	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2857	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2858	if (rdev->num_crtc >= 4) {
2859		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2860		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2861	}
2862	if (rdev->num_crtc >= 6) {
2863		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2864		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2865	}
2866
2867	rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2868	rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2869	rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2870	rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2871	rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2872	rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2873
2874	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2875		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2876	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2877		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2878	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2879		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2880	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2881		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2882	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2883		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2884	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2885		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2886
2887	if (rdev->num_crtc >= 4) {
2888		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2889			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2890		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2891			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2892		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2893			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2894		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2895			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2896		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2897			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2898		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2899			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2900	}
2901
2902	if (rdev->num_crtc >= 6) {
2903		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2904			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2905		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2906			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2907		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2908			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2909		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2910			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2911		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2912			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2913		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2914			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2915	}
2916
2917	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2918		tmp = RREG32(DC_HPD1_INT_CONTROL);
2919		tmp |= DC_HPDx_INT_ACK;
2920		WREG32(DC_HPD1_INT_CONTROL, tmp);
2921	}
2922	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2923		tmp = RREG32(DC_HPD2_INT_CONTROL);
2924		tmp |= DC_HPDx_INT_ACK;
2925		WREG32(DC_HPD2_INT_CONTROL, tmp);
2926	}
2927	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2928		tmp = RREG32(DC_HPD3_INT_CONTROL);
2929		tmp |= DC_HPDx_INT_ACK;
2930		WREG32(DC_HPD3_INT_CONTROL, tmp);
2931	}
2932	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2933		tmp = RREG32(DC_HPD4_INT_CONTROL);
2934		tmp |= DC_HPDx_INT_ACK;
2935		WREG32(DC_HPD4_INT_CONTROL, tmp);
2936	}
2937	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2938		tmp = RREG32(DC_HPD5_INT_CONTROL);
2939		tmp |= DC_HPDx_INT_ACK;
2940		WREG32(DC_HPD5_INT_CONTROL, tmp);
2941	}
2942	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2943		tmp = RREG32(DC_HPD5_INT_CONTROL);
2944		tmp |= DC_HPDx_INT_ACK;
2945		WREG32(DC_HPD6_INT_CONTROL, tmp);
2946	}
2947	if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
2948		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
2949		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2950		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
2951	}
2952	if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
2953		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
2954		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2955		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
2956	}
2957	if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
2958		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
2959		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2960		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
2961	}
2962	if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
2963		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
2964		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2965		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
2966	}
2967	if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
2968		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
2969		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2970		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
2971	}
2972	if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
2973		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
2974		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2975		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
2976	}
2977}
2978
2979static void evergreen_irq_disable(struct radeon_device *rdev)
2980{
2981	r600_disable_interrupts(rdev);
2982	/* Wait and acknowledge irq */
2983	mdelay(1);
2984	evergreen_irq_ack(rdev);
2985	evergreen_disable_interrupt_state(rdev);
2986}
2987
2988void evergreen_irq_suspend(struct radeon_device *rdev)
2989{
2990	evergreen_irq_disable(rdev);
2991	r600_rlc_stop(rdev);
2992}
2993
2994static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2995{
2996	u32 wptr, tmp;
2997
2998	if (rdev->wb.enabled)
2999		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3000	else
3001		wptr = RREG32(IH_RB_WPTR);
3002
3003	if (wptr & RB_OVERFLOW) {
3004		/* When a ring buffer overflow happen start parsing interrupt
3005		 * from the last not overwritten vector (wptr + 16). Hopefully
3006		 * this should allow us to catchup.
3007		 */
3008		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3009			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3010		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3011		tmp = RREG32(IH_RB_CNTL);
3012		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3013		WREG32(IH_RB_CNTL, tmp);
3014	}
3015	return (wptr & rdev->ih.ptr_mask);
3016}
3017
3018irqreturn_t evergreen_irq_process(struct radeon_device *rdev)
3019{
3020	u32 wptr;
3021	u32 rptr;
3022	u32 src_id, src_data;
3023	u32 ring_index;
3024	bool queue_hotplug = false;
3025	bool queue_hdmi = false;
3026
3027	if (!rdev->ih.enabled || rdev->shutdown)
3028		return IRQ_NONE;
3029
3030	wptr = evergreen_get_ih_wptr(rdev);
3031
3032restart_ih:
3033	/* is somebody else already processing irqs? */
3034	if (atomic_xchg(&rdev->ih.lock, 1))
3035		return IRQ_NONE;
3036
3037	rptr = rdev->ih.rptr;
3038	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3039
3040	/* Order reading of wptr vs. reading of IH ring data */
3041	rmb();
3042
3043	/* display interrupts */
3044	evergreen_irq_ack(rdev);
3045
3046	while (rptr != wptr) {
3047		/* wptr/rptr are in bytes! */
3048		ring_index = rptr / 4;
3049		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3050		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3051
3052		switch (src_id) {
3053		case 1: /* D1 vblank/vline */
3054			switch (src_data) {
3055			case 0: /* D1 vblank */
3056				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
3057					if (rdev->irq.crtc_vblank_int[0]) {
3058						drm_handle_vblank(rdev->ddev, 0);
3059						rdev->pm.vblank_sync = true;
3060						DRM_WAKEUP(&rdev->irq.vblank_queue);
3061					}
3062					if (atomic_read(&rdev->irq.pflip[0]))
3063						radeon_crtc_handle_flip(rdev, 0);
3064					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3065					DRM_DEBUG("IH: D1 vblank\n");
3066				}
3067				break;
3068			case 1: /* D1 vline */
3069				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3070					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3071					DRM_DEBUG("IH: D1 vline\n");
3072				}
3073				break;
3074			default:
3075				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3076				break;
3077			}
3078			break;
3079		case 2: /* D2 vblank/vline */
3080			switch (src_data) {
3081			case 0: /* D2 vblank */
3082				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
3083					if (rdev->irq.crtc_vblank_int[1]) {
3084						drm_handle_vblank(rdev->ddev, 1);
3085						rdev->pm.vblank_sync = true;
3086						DRM_WAKEUP(&rdev->irq.vblank_queue);
3087					}
3088					if (atomic_read(&rdev->irq.pflip[1]))
3089						radeon_crtc_handle_flip(rdev, 1);
3090					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3091					DRM_DEBUG("IH: D2 vblank\n");
3092				}
3093				break;
3094			case 1: /* D2 vline */
3095				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3096					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3097					DRM_DEBUG("IH: D2 vline\n");
3098				}
3099				break;
3100			default:
3101				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3102				break;
3103			}
3104			break;
3105		case 3: /* D3 vblank/vline */
3106			switch (src_data) {
3107			case 0: /* D3 vblank */
3108				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3109					if (rdev->irq.crtc_vblank_int[2]) {
3110						drm_handle_vblank(rdev->ddev, 2);
3111						rdev->pm.vblank_sync = true;
3112						DRM_WAKEUP(&rdev->irq.vblank_queue);
3113					}
3114					if (atomic_read(&rdev->irq.pflip[2]))
3115						radeon_crtc_handle_flip(rdev, 2);
3116					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3117					DRM_DEBUG("IH: D3 vblank\n");
3118				}
3119				break;
3120			case 1: /* D3 vline */
3121				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3122					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3123					DRM_DEBUG("IH: D3 vline\n");
3124				}
3125				break;
3126			default:
3127				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3128				break;
3129			}
3130			break;
3131		case 4: /* D4 vblank/vline */
3132			switch (src_data) {
3133			case 0: /* D4 vblank */
3134				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3135					if (rdev->irq.crtc_vblank_int[3]) {
3136						drm_handle_vblank(rdev->ddev, 3);
3137						rdev->pm.vblank_sync = true;
3138						DRM_WAKEUP(&rdev->irq.vblank_queue);
3139					}
3140					if (atomic_read(&rdev->irq.pflip[3]))
3141						radeon_crtc_handle_flip(rdev, 3);
3142					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3143					DRM_DEBUG("IH: D4 vblank\n");
3144				}
3145				break;
3146			case 1: /* D4 vline */
3147				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3148					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3149					DRM_DEBUG("IH: D4 vline\n");
3150				}
3151				break;
3152			default:
3153				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3154				break;
3155			}
3156			break;
3157		case 5: /* D5 vblank/vline */
3158			switch (src_data) {
3159			case 0: /* D5 vblank */
3160				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3161					if (rdev->irq.crtc_vblank_int[4]) {
3162						drm_handle_vblank(rdev->ddev, 4);
3163						rdev->pm.vblank_sync = true;
3164						DRM_WAKEUP(&rdev->irq.vblank_queue);
3165					}
3166					if (atomic_read(&rdev->irq.pflip[4]))
3167						radeon_crtc_handle_flip(rdev, 4);
3168					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3169					DRM_DEBUG("IH: D5 vblank\n");
3170				}
3171				break;
3172			case 1: /* D5 vline */
3173				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3174					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3175					DRM_DEBUG("IH: D5 vline\n");
3176				}
3177				break;
3178			default:
3179				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3180				break;
3181			}
3182			break;
3183		case 6: /* D6 vblank/vline */
3184			switch (src_data) {
3185			case 0: /* D6 vblank */
3186				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3187					if (rdev->irq.crtc_vblank_int[5]) {
3188						drm_handle_vblank(rdev->ddev, 5);
3189						rdev->pm.vblank_sync = true;
3190						DRM_WAKEUP(&rdev->irq.vblank_queue);
3191					}
3192					if (atomic_read(&rdev->irq.pflip[5]))
3193						radeon_crtc_handle_flip(rdev, 5);
3194					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3195					DRM_DEBUG("IH: D6 vblank\n");
3196				}
3197				break;
3198			case 1: /* D6 vline */
3199				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3200					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3201					DRM_DEBUG("IH: D6 vline\n");
3202				}
3203				break;
3204			default:
3205				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3206				break;
3207			}
3208			break;
3209		case 42: /* HPD hotplug */
3210			switch (src_data) {
3211			case 0:
3212				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3213					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
3214					queue_hotplug = true;
3215					DRM_DEBUG("IH: HPD1\n");
3216				}
3217				break;
3218			case 1:
3219				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3220					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3221					queue_hotplug = true;
3222					DRM_DEBUG("IH: HPD2\n");
3223				}
3224				break;
3225			case 2:
3226				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3227					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3228					queue_hotplug = true;
3229					DRM_DEBUG("IH: HPD3\n");
3230				}
3231				break;
3232			case 3:
3233				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3234					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3235					queue_hotplug = true;
3236					DRM_DEBUG("IH: HPD4\n");
3237				}
3238				break;
3239			case 4:
3240				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3241					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3242					queue_hotplug = true;
3243					DRM_DEBUG("IH: HPD5\n");
3244				}
3245				break;
3246			case 5:
3247				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3248					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3249					queue_hotplug = true;
3250					DRM_DEBUG("IH: HPD6\n");
3251				}
3252				break;
3253			default:
3254				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3255				break;
3256			}
3257			break;
3258		case 44: /* hdmi */
3259			switch (src_data) {
3260			case 0:
3261				if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3262					rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3263					queue_hdmi = true;
3264					DRM_DEBUG("IH: HDMI0\n");
3265				}
3266				break;
3267			case 1:
3268				if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3269					rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3270					queue_hdmi = true;
3271					DRM_DEBUG("IH: HDMI1\n");
3272				}
3273				break;
3274			case 2:
3275				if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3276					rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3277					queue_hdmi = true;
3278					DRM_DEBUG("IH: HDMI2\n");
3279				}
3280				break;
3281			case 3:
3282				if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3283					rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3284					queue_hdmi = true;
3285					DRM_DEBUG("IH: HDMI3\n");
3286				}
3287				break;
3288			case 4:
3289				if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3290					rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3291					queue_hdmi = true;
3292					DRM_DEBUG("IH: HDMI4\n");
3293				}
3294				break;
3295			case 5:
3296				if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3297					rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3298					queue_hdmi = true;
3299					DRM_DEBUG("IH: HDMI5\n");
3300				}
3301				break;
3302			default:
3303				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3304				break;
3305			}
3306			break;
3307		case 146:
3308		case 147:
3309			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3310			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
3311				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3312			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3313				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3314			/* reset addr and status */
3315			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3316			break;
3317		case 176: /* CP_INT in ring buffer */
3318		case 177: /* CP_INT in IB1 */
3319		case 178: /* CP_INT in IB2 */
3320			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3321			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3322			break;
3323		case 181: /* CP EOP event */
3324			DRM_DEBUG("IH: CP EOP\n");
3325			if (rdev->family >= CHIP_CAYMAN) {
3326				switch (src_data) {
3327				case 0:
3328					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3329					break;
3330				case 1:
3331					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3332					break;
3333				case 2:
3334					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3335					break;
3336				}
3337			} else
3338				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3339			break;
3340		case 224: /* DMA trap event */
3341			DRM_DEBUG("IH: DMA trap\n");
3342			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3343			break;
3344		case 233: /* GUI IDLE */
3345			DRM_DEBUG("IH: GUI idle\n");
3346			break;
3347		case 244: /* DMA trap event */
3348			if (rdev->family >= CHIP_CAYMAN) {
3349				DRM_DEBUG("IH: DMA1 trap\n");
3350				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3351			}
3352			break;
3353		default:
3354			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3355			break;
3356		}
3357
3358		/* wptr/rptr are in bytes! */
3359		rptr += 16;
3360		rptr &= rdev->ih.ptr_mask;
3361	}
3362	if (queue_hotplug)
3363		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
3364	if (queue_hdmi)
3365		taskqueue_enqueue(rdev->tq, &rdev->audio_work);
3366	rdev->ih.rptr = rptr;
3367	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3368	atomic_set(&rdev->ih.lock, 0);
3369
3370	/* make sure wptr hasn't changed while processing */
3371	wptr = evergreen_get_ih_wptr(rdev);
3372	if (wptr != rptr)
3373		goto restart_ih;
3374
3375	return IRQ_HANDLED;
3376}
3377
3378/**
3379 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3380 *
3381 * @rdev: radeon_device pointer
3382 * @fence: radeon fence object
3383 *
3384 * Add a DMA fence packet to the ring to write
3385 * the fence seq number and DMA trap packet to generate
3386 * an interrupt if needed (evergreen-SI).
3387 */
3388void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3389				   struct radeon_fence *fence)
3390{
3391	struct radeon_ring *ring = &rdev->ring[fence->ring];
3392	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3393	/* write the fence */
3394	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3395	radeon_ring_write(ring, addr & 0xfffffffc);
3396	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3397	radeon_ring_write(ring, fence->seq);
3398	/* generate an interrupt */
3399	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3400	/* flush HDP */
3401	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
3402	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3403	radeon_ring_write(ring, 1);
3404}
3405
3406/**
3407 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3408 *
3409 * @rdev: radeon_device pointer
3410 * @ib: IB object to schedule
3411 *
3412 * Schedule an IB in the DMA ring (evergreen).
3413 */
3414void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3415				   struct radeon_ib *ib)
3416{
3417	struct radeon_ring *ring = &rdev->ring[ib->ring];
3418
3419	if (rdev->wb.enabled) {
3420		u32 next_rptr = ring->wptr + 4;
3421		while ((next_rptr & 7) != 5)
3422			next_rptr++;
3423		next_rptr += 3;
3424		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3425		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3426		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3427		radeon_ring_write(ring, next_rptr);
3428	}
3429
3430	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3431	 * Pad as necessary with NOPs.
3432	 */
3433	while ((ring->wptr & 7) != 5)
3434		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3435	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3436	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3437	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3438
3439}
3440
3441/**
3442 * evergreen_copy_dma - copy pages using the DMA engine
3443 *
3444 * @rdev: radeon_device pointer
3445 * @src_offset: src GPU address
3446 * @dst_offset: dst GPU address
3447 * @num_gpu_pages: number of GPU pages to xfer
3448 * @fence: radeon fence object
3449 *
3450 * Copy GPU paging using the DMA engine (evergreen-cayman).
3451 * Used by the radeon ttm implementation to move pages if
3452 * registered as the asic copy callback.
3453 */
3454int evergreen_copy_dma(struct radeon_device *rdev,
3455		       uint64_t src_offset, uint64_t dst_offset,
3456		       unsigned num_gpu_pages,
3457		       struct radeon_fence **fence)
3458{
3459	struct radeon_semaphore *sem = NULL;
3460	int ring_index = rdev->asic->copy.dma_ring_index;
3461	struct radeon_ring *ring = &rdev->ring[ring_index];
3462	u32 size_in_dw, cur_size_in_dw;
3463	int i, num_loops;
3464	int r = 0;
3465
3466	r = radeon_semaphore_create(rdev, &sem);
3467	if (r) {
3468		DRM_ERROR("radeon: moving bo (%d).\n", r);
3469		return r;
3470	}
3471
3472	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3473	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3474	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3475	if (r) {
3476		DRM_ERROR("radeon: moving bo (%d).\n", r);
3477		radeon_semaphore_free(rdev, &sem, NULL);
3478		return r;
3479	}
3480
3481	if (radeon_fence_need_sync(*fence, ring->idx)) {
3482		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3483					    ring->idx);
3484		radeon_fence_note_sync(*fence, ring->idx);
3485	} else {
3486		radeon_semaphore_free(rdev, &sem, NULL);
3487	}
3488
3489	for (i = 0; i < num_loops; i++) {
3490		cur_size_in_dw = size_in_dw;
3491		if (cur_size_in_dw > 0xFFFFF)
3492			cur_size_in_dw = 0xFFFFF;
3493		size_in_dw -= cur_size_in_dw;
3494		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3495		radeon_ring_write(ring, dst_offset & 0xfffffffc);
3496		radeon_ring_write(ring, src_offset & 0xfffffffc);
3497		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3498		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3499		src_offset += cur_size_in_dw * 4;
3500		dst_offset += cur_size_in_dw * 4;
3501	}
3502
3503	r = radeon_fence_emit(rdev, fence, ring->idx);
3504	if (r) {
3505		radeon_ring_unlock_undo(rdev, ring);
3506		return r;
3507	}
3508
3509	radeon_ring_unlock_commit(rdev, ring);
3510	radeon_semaphore_free(rdev, &sem, *fence);
3511
3512	return r;
3513}
3514
3515static int evergreen_startup(struct radeon_device *rdev)
3516{
3517	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3518	int r;
3519
3520	/* enable pcie gen2 link */
3521	evergreen_pcie_gen2_enable(rdev);
3522
3523	if (ASIC_IS_DCE5(rdev)) {
3524		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3525			r = ni_init_microcode(rdev);
3526			if (r) {
3527				DRM_ERROR("Failed to load firmware!\n");
3528				return r;
3529			}
3530		}
3531		r = ni_mc_load_microcode(rdev);
3532		if (r) {
3533			DRM_ERROR("Failed to load MC firmware!\n");
3534			return r;
3535		}
3536	} else {
3537		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3538			r = r600_init_microcode(rdev);
3539			if (r) {
3540				DRM_ERROR("Failed to load firmware!\n");
3541				return r;
3542			}
3543		}
3544	}
3545
3546	r = r600_vram_scratch_init(rdev);
3547	if (r)
3548		return r;
3549
3550	evergreen_mc_program(rdev);
3551	if (rdev->flags & RADEON_IS_AGP) {
3552		evergreen_agp_enable(rdev);
3553	} else {
3554		r = evergreen_pcie_gart_enable(rdev);
3555		if (r)
3556			return r;
3557	}
3558	evergreen_gpu_init(rdev);
3559
3560	r = evergreen_blit_init(rdev);
3561	if (r) {
3562		r600_blit_fini(rdev);
3563		rdev->asic->copy.copy = NULL;
3564		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3565	}
3566
3567	/* allocate wb buffer */
3568	r = radeon_wb_init(rdev);
3569	if (r)
3570		return r;
3571
3572	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3573	if (r) {
3574		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3575		return r;
3576	}
3577
3578	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3579	if (r) {
3580		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3581		return r;
3582	}
3583
3584	/* Enable IRQ */
3585	r = r600_irq_init(rdev);
3586	if (r) {
3587		DRM_ERROR("radeon: IH init failed (%d).\n", r);
3588		radeon_irq_kms_fini(rdev);
3589		return r;
3590	}
3591	evergreen_irq_set(rdev);
3592
3593	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3594			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3595			     0, 0xfffff, RADEON_CP_PACKET2);
3596	if (r)
3597		return r;
3598
3599	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3600	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3601			     DMA_RB_RPTR, DMA_RB_WPTR,
3602			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3603	if (r)
3604		return r;
3605
3606	r = evergreen_cp_load_microcode(rdev);
3607	if (r)
3608		return r;
3609	r = evergreen_cp_resume(rdev);
3610	if (r)
3611		return r;
3612	r = r600_dma_resume(rdev);
3613	if (r)
3614		return r;
3615
3616	r = radeon_ib_pool_init(rdev);
3617	if (r) {
3618		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3619		return r;
3620	}
3621
3622	r = r600_audio_init(rdev);
3623	if (r) {
3624		DRM_ERROR("radeon: audio init failed\n");
3625		return r;
3626	}
3627
3628	return 0;
3629}
3630
3631int evergreen_resume(struct radeon_device *rdev)
3632{
3633	int r;
3634
3635	/* reset the asic, the gfx blocks are often in a bad state
3636	 * after the driver is unloaded or after a resume
3637	 */
3638	if (radeon_asic_reset(rdev))
3639		dev_warn(rdev->dev, "GPU reset failed !\n");
3640	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3641	 * posting will perform necessary task to bring back GPU into good
3642	 * shape.
3643	 */
3644	/* post card */
3645	atom_asic_init(rdev->mode_info.atom_context);
3646
3647	rdev->accel_working = true;
3648	r = evergreen_startup(rdev);
3649	if (r) {
3650		DRM_ERROR("evergreen startup failed on resume\n");
3651		rdev->accel_working = false;
3652		return r;
3653	}
3654
3655	return r;
3656
3657}
3658
3659int evergreen_suspend(struct radeon_device *rdev)
3660{
3661	r600_audio_fini(rdev);
3662	r700_cp_stop(rdev);
3663	r600_dma_stop(rdev);
3664	evergreen_irq_suspend(rdev);
3665	radeon_wb_disable(rdev);
3666	evergreen_pcie_gart_disable(rdev);
3667
3668	return 0;
3669}
3670
3671/* Plan is to move initialization in that function and use
3672 * helper function so that radeon_device_init pretty much
3673 * do nothing more than calling asic specific function. This
3674 * should also allow to remove a bunch of callback function
3675 * like vram_info.
3676 */
3677int evergreen_init(struct radeon_device *rdev)
3678{
3679	int r;
3680
3681	/* Read BIOS */
3682	if (!radeon_get_bios(rdev)) {
3683		if (ASIC_IS_AVIVO(rdev))
3684			return -EINVAL;
3685	}
3686	/* Must be an ATOMBIOS */
3687	if (!rdev->is_atom_bios) {
3688		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
3689		return -EINVAL;
3690	}
3691	r = radeon_atombios_init(rdev);
3692	if (r)
3693		return r;
3694	/* reset the asic, the gfx blocks are often in a bad state
3695	 * after the driver is unloaded or after a resume
3696	 */
3697	if (radeon_asic_reset(rdev))
3698		dev_warn(rdev->dev, "GPU reset failed !\n");
3699	/* Post card if necessary */
3700	if (!radeon_card_posted(rdev)) {
3701		if (!rdev->bios) {
3702			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3703			return -EINVAL;
3704		}
3705		DRM_INFO("GPU not posted. posting now...\n");
3706		atom_asic_init(rdev->mode_info.atom_context);
3707	}
3708	/* Initialize scratch registers */
3709	r600_scratch_init(rdev);
3710	/* Initialize surface registers */
3711	radeon_surface_init(rdev);
3712	/* Initialize clocks */
3713	radeon_get_clock_info(rdev->ddev);
3714	/* Fence driver */
3715	r = radeon_fence_driver_init(rdev);
3716	if (r)
3717		return r;
3718	/* initialize AGP */
3719	if (rdev->flags & RADEON_IS_AGP) {
3720		r = radeon_agp_init(rdev);
3721		if (r)
3722			radeon_agp_disable(rdev);
3723	}
3724	/* initialize memory controller */
3725	r = evergreen_mc_init(rdev);
3726	if (r)
3727		return r;
3728	/* Memory manager */
3729	r = radeon_bo_init(rdev);
3730	if (r)
3731		return r;
3732
3733	r = radeon_irq_kms_init(rdev);
3734	if (r)
3735		return r;
3736
3737	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3738	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3739
3740	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3741	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3742
3743	rdev->ih.ring_obj = NULL;
3744	r600_ih_ring_init(rdev, 64 * 1024);
3745
3746	r = r600_pcie_gart_init(rdev);
3747	if (r)
3748		return r;
3749
3750	rdev->accel_working = true;
3751	r = evergreen_startup(rdev);
3752	if (r) {
3753		dev_err(rdev->dev, "disabling GPU acceleration\n");
3754		r700_cp_fini(rdev);
3755		r600_dma_fini(rdev);
3756		r600_irq_fini(rdev);
3757		radeon_wb_fini(rdev);
3758		radeon_ib_pool_fini(rdev);
3759		radeon_irq_kms_fini(rdev);
3760		evergreen_pcie_gart_fini(rdev);
3761		rdev->accel_working = false;
3762	}
3763
3764	/* Don't start up if the MC ucode is missing on BTC parts.
3765	 * The default clocks and voltages before the MC ucode
3766	 * is loaded are not suffient for advanced operations.
3767	 */
3768	if (ASIC_IS_DCE5(rdev)) {
3769		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3770			DRM_ERROR("radeon: MC ucode required for NI+.\n");
3771			return -EINVAL;
3772		}
3773	}
3774
3775	return 0;
3776}
3777
3778void evergreen_fini(struct radeon_device *rdev)
3779{
3780	r600_audio_fini(rdev);
3781	r600_blit_fini(rdev);
3782	r700_cp_fini(rdev);
3783	r600_dma_fini(rdev);
3784	r600_irq_fini(rdev);
3785	radeon_wb_fini(rdev);
3786	radeon_ib_pool_fini(rdev);
3787	radeon_irq_kms_fini(rdev);
3788	evergreen_pcie_gart_fini(rdev);
3789	r600_vram_scratch_fini(rdev);
3790	radeon_gem_fini(rdev);
3791	radeon_fence_driver_fini(rdev);
3792	radeon_agp_fini(rdev);
3793	radeon_bo_fini(rdev);
3794	radeon_atombios_fini(rdev);
3795	if (ASIC_IS_DCE5(rdev))
3796		ni_fini_microcode(rdev);
3797	else
3798		r600_fini_microcode(rdev);
3799	free(rdev->bios, DRM_MEM_DRIVER);
3800	rdev->bios = NULL;
3801}
3802
3803void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3804{
3805	u32 link_width_cntl, speed_cntl, mask;
3806	int ret;
3807
3808	if (radeon_pcie_gen2 == 0)
3809		return;
3810
3811	if (rdev->flags & RADEON_IS_IGP)
3812		return;
3813
3814	if (!(rdev->flags & RADEON_IS_PCIE))
3815		return;
3816
3817	/* x2 cards have a special sequence */
3818	if (ASIC_IS_X2(rdev))
3819		return;
3820
3821	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
3822	if (ret != 0)
3823		return;
3824
3825	if (!(mask & DRM_PCIE_SPEED_50))
3826		return;
3827
3828	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3829	if (speed_cntl & LC_CURRENT_DATA_RATE) {
3830		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3831		return;
3832	}
3833
3834	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3835
3836	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3837	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3838
3839		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3840		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3841		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3842
3843		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3844		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3845		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3846
3847		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3848		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3849		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3850
3851		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3852		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3853		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3854
3855		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3856		speed_cntl |= LC_GEN2_EN_STRAP;
3857		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3858
3859	} else {
3860		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3861		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3862		if (1)
3863			link_width_cntl |= LC_UPCONFIGURE_DIS;
3864		else
3865			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3866		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3867	}
3868}
3869