radeon_cp.c revision 182080
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2/*-
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2007 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Kevin E. Martin <martin@valinux.com>
29 *    Gareth Hughes <gareth@valinux.com>
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm/radeon_cp.c 182080 2008-08-23 20:59:12Z rnoland $");
34
35#include "dev/drm/drmP.h"
36#include "dev/drm/drm.h"
37#include "dev/drm/radeon_drm.h"
38#include "dev/drm/radeon_drv.h"
39#include "dev/drm/r300_reg.h"
40
41#include "dev/drm/radeon_microcode.h"
42#define RADEON_FIFO_DEBUG	0
43
44static int radeon_do_cleanup_cp(struct drm_device * dev);
45static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
46
47static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
48{
49	u32 ret;
50	RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
51	ret = RADEON_READ(R520_MC_IND_DATA);
52	RADEON_WRITE(R520_MC_IND_INDEX, 0);
53	return ret;
54}
55
56static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
57{
58	u32 ret;
59	RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
60	ret = RADEON_READ(RS480_NB_MC_DATA);
61	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
62	return ret;
63}
64
65static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
66{
67	u32 ret;
68	RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
69	ret = RADEON_READ(RS690_MC_DATA);
70	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
71	return ret;
72}
73
74static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
75{
76        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
77	    return RS690_READ_MCIND(dev_priv, addr);
78	else
79	    return RS480_READ_MCIND(dev_priv, addr);
80}
81
82u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
83{
84
85	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
86		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
87	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
88		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
89	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
90		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
91	else
92		return RADEON_READ(RADEON_MC_FB_LOCATION);
93}
94
95static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
96{
97	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
98		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
99	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
100		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
101	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
102		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
103	else
104		RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
105}
106
107static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
108{
109	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
110		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
111	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
112		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
113	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
114		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
115	else
116		RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
117}
118
119static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
120{
121	u32 agp_base_hi = upper_32_bits(agp_base);
122	u32 agp_base_lo = agp_base & 0xffffffff;
123
124	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
125		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
126		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
127	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
128		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
129		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
130	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
131		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
132		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
133	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
134		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
135		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
136		RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
137	} else {
138		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
139		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
140			RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
141	}
142}
143
144static int RADEON_READ_PLL(struct drm_device * dev, int addr)
145{
146	drm_radeon_private_t *dev_priv = dev->dev_private;
147
148	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
149	return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
150}
151
152static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
153{
154	RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
155	return RADEON_READ(RADEON_PCIE_DATA);
156}
157
158#if RADEON_FIFO_DEBUG
159static void radeon_status(drm_radeon_private_t * dev_priv)
160{
161	printk("%s:\n", __FUNCTION__);
162	printk("RBBM_STATUS = 0x%08x\n",
163	       (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
164	printk("CP_RB_RTPR = 0x%08x\n",
165	       (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
166	printk("CP_RB_WTPR = 0x%08x\n",
167	       (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
168	printk("AIC_CNTL = 0x%08x\n",
169	       (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
170	printk("AIC_STAT = 0x%08x\n",
171	       (unsigned int)RADEON_READ(RADEON_AIC_STAT));
172	printk("AIC_PT_BASE = 0x%08x\n",
173	       (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
174	printk("TLB_ADDR = 0x%08x\n",
175	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
176	printk("TLB_DATA = 0x%08x\n",
177	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
178}
179#endif
180
181/* ================================================================
182 * Engine, FIFO control
183 */
184
185static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
186{
187	u32 tmp;
188	int i;
189
190	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
191
192	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
193		tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
194		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
195		RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
196
197		for (i = 0; i < dev_priv->usec_timeout; i++) {
198			if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
199			      & RADEON_RB3D_DC_BUSY)) {
200				return 0;
201			}
202			DRM_UDELAY(1);
203		}
204	} else {
205		/* don't flush or purge cache here or lockup */
206		return 0;
207	}
208
209#if RADEON_FIFO_DEBUG
210	DRM_ERROR("failed!\n");
211	radeon_status(dev_priv);
212#endif
213	return -EBUSY;
214}
215
216static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
217{
218	int i;
219
220	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
221
222	for (i = 0; i < dev_priv->usec_timeout; i++) {
223		int slots = (RADEON_READ(RADEON_RBBM_STATUS)
224			     & RADEON_RBBM_FIFOCNT_MASK);
225		if (slots >= entries)
226			return 0;
227		DRM_UDELAY(1);
228	}
229	DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
230		 RADEON_READ(RADEON_RBBM_STATUS),
231		 RADEON_READ(R300_VAP_CNTL_STATUS));
232
233#if RADEON_FIFO_DEBUG
234	DRM_ERROR("failed!\n");
235	radeon_status(dev_priv);
236#endif
237	return -EBUSY;
238}
239
240static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
241{
242	int i, ret;
243
244	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
245
246	ret = radeon_do_wait_for_fifo(dev_priv, 64);
247	if (ret)
248		return ret;
249
250	for (i = 0; i < dev_priv->usec_timeout; i++) {
251		if (!(RADEON_READ(RADEON_RBBM_STATUS)
252		      & RADEON_RBBM_ACTIVE)) {
253			radeon_do_pixcache_flush(dev_priv);
254			return 0;
255		}
256		DRM_UDELAY(1);
257	}
258	DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
259		 RADEON_READ(RADEON_RBBM_STATUS),
260		 RADEON_READ(R300_VAP_CNTL_STATUS));
261
262#if RADEON_FIFO_DEBUG
263	DRM_ERROR("failed!\n");
264	radeon_status(dev_priv);
265#endif
266	return -EBUSY;
267}
268
269static void radeon_init_pipes(drm_radeon_private_t * dev_priv)
270{
271	uint32_t gb_tile_config, gb_pipe_sel = 0;
272
273	/* RS4xx/RS6xx/R4xx/R5xx */
274	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
275		gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
276		dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
277	} else {
278		/* R3xx */
279		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
280		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
281			dev_priv->num_gb_pipes = 2;
282		} else {
283			/* R3Vxx */
284			dev_priv->num_gb_pipes = 1;
285		}
286	}
287	DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
288
289	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
290
291	switch(dev_priv->num_gb_pipes) {
292	case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
293	case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
294	case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
295	default:
296	case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
297	}
298
299	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
300		RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
301		RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
302	}
303	RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
304	radeon_do_wait_for_idle(dev_priv);
305	RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
306	RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
307					       R300_DC_AUTOFLUSH_ENABLE |
308					       R300_DC_DC_DISABLE_IGNORE_PE));
309
310
311}
312
313/* ================================================================
314 * CP control, initialization
315 */
316
317/* Load the microcode for the CP */
318static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
319{
320	int i;
321	DRM_DEBUG("\n");
322
323	radeon_do_wait_for_idle(dev_priv);
324
325	RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
326
327	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
328	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
329	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
330	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
331	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
332		DRM_INFO("Loading R100 Microcode\n");
333		for (i = 0; i < 256; i++) {
334			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
335				     R100_cp_microcode[i][1]);
336			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
337				     R100_cp_microcode[i][0]);
338		}
339	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
340		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
341		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
342		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
343		DRM_INFO("Loading R200 Microcode\n");
344		for (i = 0; i < 256; i++) {
345			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
346				     R200_cp_microcode[i][1]);
347			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
348				     R200_cp_microcode[i][0]);
349		}
350	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
351		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
352		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
353		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
354		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
355		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
356		DRM_INFO("Loading R300 Microcode\n");
357		for (i = 0; i < 256; i++) {
358			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
359				     R300_cp_microcode[i][1]);
360			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
361				     R300_cp_microcode[i][0]);
362		}
363	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
364		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
365		DRM_INFO("Loading R400 Microcode\n");
366		for (i = 0; i < 256; i++) {
367			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
368				     R420_cp_microcode[i][1]);
369			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
370				     R420_cp_microcode[i][0]);
371		}
372	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
373		DRM_INFO("Loading RS690 Microcode\n");
374		for (i = 0; i < 256; i++) {
375			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
376				     RS690_cp_microcode[i][1]);
377			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
378				     RS690_cp_microcode[i][0]);
379		}
380	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
381		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
382		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
383		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
384		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
385		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
386		DRM_INFO("Loading R500 Microcode\n");
387		for (i = 0; i < 256; i++) {
388			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
389				     R520_cp_microcode[i][1]);
390			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
391				     R520_cp_microcode[i][0]);
392		}
393	}
394}
395
396/* Flush any pending commands to the CP.  This should only be used just
397 * prior to a wait for idle, as it informs the engine that the command
398 * stream is ending.
399 */
400static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
401{
402	DRM_DEBUG("\n");
403#if 0
404	u32 tmp;
405
406	tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
407	RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
408#endif
409}
410
411/* Wait for the CP to go idle.
412 */
413int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
414{
415	RING_LOCALS;
416	DRM_DEBUG("\n");
417
418	BEGIN_RING(6);
419
420	RADEON_PURGE_CACHE();
421	RADEON_PURGE_ZCACHE();
422	RADEON_WAIT_UNTIL_IDLE();
423
424	ADVANCE_RING();
425	COMMIT_RING();
426
427	return radeon_do_wait_for_idle(dev_priv);
428}
429
430/* Start the Command Processor.
431 */
432static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
433{
434	RING_LOCALS;
435	DRM_DEBUG("\n");
436
437	radeon_do_wait_for_idle(dev_priv);
438
439	RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
440
441	dev_priv->cp_running = 1;
442
443	BEGIN_RING(8);
444	/* isync can only be written through cp on r5xx write it here */
445	OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
446	OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
447		 RADEON_ISYNC_ANY3D_IDLE2D |
448		 RADEON_ISYNC_WAIT_IDLEGUI |
449		 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
450	RADEON_PURGE_CACHE();
451	RADEON_PURGE_ZCACHE();
452	RADEON_WAIT_UNTIL_IDLE();
453	ADVANCE_RING();
454	COMMIT_RING();
455
456	dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
457}
458
459/* Reset the Command Processor.  This will not flush any pending
460 * commands, so you must wait for the CP command stream to complete
461 * before calling this routine.
462 */
463static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
464{
465	u32 cur_read_ptr;
466	DRM_DEBUG("\n");
467
468	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
469	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
470	SET_RING_HEAD(dev_priv, cur_read_ptr);
471	dev_priv->ring.tail = cur_read_ptr;
472}
473
474/* Stop the Command Processor.  This will not flush any pending
475 * commands, so you must flush the command stream and wait for the CP
476 * to go idle before calling this routine.
477 */
478static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
479{
480	DRM_DEBUG("\n");
481
482	RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
483
484	dev_priv->cp_running = 0;
485}
486
487/* Reset the engine.  This will stop the CP if it is running.
488 */
489static int radeon_do_engine_reset(struct drm_device * dev)
490{
491	drm_radeon_private_t *dev_priv = dev->dev_private;
492	u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
493	DRM_DEBUG("\n");
494
495	radeon_do_pixcache_flush(dev_priv);
496
497	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
498	        /* may need something similar for newer chips */
499		clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
500		mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
501
502		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
503						    RADEON_FORCEON_MCLKA |
504						    RADEON_FORCEON_MCLKB |
505						    RADEON_FORCEON_YCLKA |
506						    RADEON_FORCEON_YCLKB |
507						    RADEON_FORCEON_MC |
508						    RADEON_FORCEON_AIC));
509	}
510
511	rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
512
513	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
514					      RADEON_SOFT_RESET_CP |
515					      RADEON_SOFT_RESET_HI |
516					      RADEON_SOFT_RESET_SE |
517					      RADEON_SOFT_RESET_RE |
518					      RADEON_SOFT_RESET_PP |
519					      RADEON_SOFT_RESET_E2 |
520					      RADEON_SOFT_RESET_RB));
521	RADEON_READ(RADEON_RBBM_SOFT_RESET);
522	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
523					      ~(RADEON_SOFT_RESET_CP |
524						RADEON_SOFT_RESET_HI |
525						RADEON_SOFT_RESET_SE |
526						RADEON_SOFT_RESET_RE |
527						RADEON_SOFT_RESET_PP |
528						RADEON_SOFT_RESET_E2 |
529						RADEON_SOFT_RESET_RB)));
530	RADEON_READ(RADEON_RBBM_SOFT_RESET);
531
532	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
533		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
534		RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
535		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
536	}
537
538	/* setup the raster pipes */
539	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
540	    radeon_init_pipes(dev_priv);
541
542	/* Reset the CP ring */
543	radeon_do_cp_reset(dev_priv);
544
545	/* The CP is no longer running after an engine reset */
546	dev_priv->cp_running = 0;
547
548	/* Reset any pending vertex, indirect buffers */
549	radeon_freelist_reset(dev);
550
551	return 0;
552}
553
554static void radeon_cp_init_ring_buffer(struct drm_device * dev,
555				       drm_radeon_private_t * dev_priv)
556{
557	u32 ring_start, cur_read_ptr;
558	u32 tmp;
559
560	/* Initialize the memory controller. With new memory map, the fb location
561	 * is not changed, it should have been properly initialized already. Part
562	 * of the problem is that the code below is bogus, assuming the GART is
563	 * always appended to the fb which is not necessarily the case
564	 */
565	if (!dev_priv->new_memmap)
566		radeon_write_fb_location(dev_priv,
567			     ((dev_priv->gart_vm_start - 1) & 0xffff0000)
568			     | (dev_priv->fb_location >> 16));
569
570#if __OS_HAS_AGP
571	if (dev_priv->flags & RADEON_IS_AGP) {
572		radeon_write_agp_base(dev_priv, dev->agp->base);
573
574		radeon_write_agp_location(dev_priv,
575			     (((dev_priv->gart_vm_start - 1 +
576				dev_priv->gart_size) & 0xffff0000) |
577			      (dev_priv->gart_vm_start >> 16)));
578
579		ring_start = (dev_priv->cp_ring->offset
580			      - dev->agp->base
581			      + dev_priv->gart_vm_start);
582	} else
583#endif
584		ring_start = (dev_priv->cp_ring->offset
585			      - (unsigned long)dev->sg->virtual
586			      + dev_priv->gart_vm_start);
587
588	RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
589
590	/* Set the write pointer delay */
591	RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
592
593	/* Initialize the ring buffer's read and write pointers */
594	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
595	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
596	SET_RING_HEAD(dev_priv, cur_read_ptr);
597	dev_priv->ring.tail = cur_read_ptr;
598
599#if __OS_HAS_AGP
600	if (dev_priv->flags & RADEON_IS_AGP) {
601		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
602			     dev_priv->ring_rptr->offset
603			     - dev->agp->base + dev_priv->gart_vm_start);
604	} else
605#endif
606	{
607		struct drm_sg_mem *entry = dev->sg;
608		unsigned long tmp_ofs, page_ofs;
609
610		tmp_ofs = dev_priv->ring_rptr->offset -
611				(unsigned long)dev->sg->virtual;
612		page_ofs = tmp_ofs >> PAGE_SHIFT;
613
614		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
615		DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
616			  (unsigned long)entry->busaddr[page_ofs],
617			  entry->handle + tmp_ofs);
618	}
619
620	/* Set ring buffer size */
621#ifdef __BIG_ENDIAN
622	RADEON_WRITE(RADEON_CP_RB_CNTL,
623		     RADEON_BUF_SWAP_32BIT |
624		     (dev_priv->ring.fetch_size_l2ow << 18) |
625		     (dev_priv->ring.rptr_update_l2qw << 8) |
626		     dev_priv->ring.size_l2qw);
627#else
628	RADEON_WRITE(RADEON_CP_RB_CNTL,
629		     (dev_priv->ring.fetch_size_l2ow << 18) |
630		     (dev_priv->ring.rptr_update_l2qw << 8) |
631		     dev_priv->ring.size_l2qw);
632#endif
633
634	/* Start with assuming that writeback doesn't work */
635	dev_priv->writeback_works = 0;
636
637	/* Initialize the scratch register pointer.  This will cause
638	 * the scratch register values to be written out to memory
639	 * whenever they are updated.
640	 *
641	 * We simply put this behind the ring read pointer, this works
642	 * with PCI GART as well as (whatever kind of) AGP GART
643	 */
644	RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
645		     + RADEON_SCRATCH_REG_OFFSET);
646
647	dev_priv->scratch = ((__volatile__ u32 *)
648			     dev_priv->ring_rptr->handle +
649			     (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
650
651	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
652
653	/* Turn on bus mastering */
654	tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
655	RADEON_WRITE(RADEON_BUS_CNTL, tmp);
656
657	dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
658	RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
659
660	dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
661	RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
662		     dev_priv->sarea_priv->last_dispatch);
663
664	dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
665	RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
666
667	radeon_do_wait_for_idle(dev_priv);
668
669	/* Sync everything up */
670	RADEON_WRITE(RADEON_ISYNC_CNTL,
671		     (RADEON_ISYNC_ANY2D_IDLE3D |
672		      RADEON_ISYNC_ANY3D_IDLE2D |
673		      RADEON_ISYNC_WAIT_IDLEGUI |
674		      RADEON_ISYNC_CPSCRATCH_IDLEGUI));
675
676}
677
678static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
679{
680	u32 tmp;
681
682	/* Writeback doesn't seem to work everywhere, test it here and possibly
683	 * enable it if it appears to work
684	 */
685	DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
686	RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
687
688	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
689		if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
690		    0xdeadbeef)
691			break;
692		DRM_UDELAY(1);
693	}
694
695	if (tmp < dev_priv->usec_timeout) {
696		dev_priv->writeback_works = 1;
697		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
698	} else {
699		dev_priv->writeback_works = 0;
700		DRM_INFO("writeback test failed\n");
701	}
702	if (radeon_no_wb == 1) {
703		dev_priv->writeback_works = 0;
704		DRM_INFO("writeback forced off\n");
705	}
706
707	if (!dev_priv->writeback_works) {
708		/* Disable writeback to avoid unnecessary bus master transfers */
709		RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE);
710		RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
711	}
712}
713
714/* Enable or disable IGP GART on the chip */
715static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
716{
717	u32 temp;
718
719	if (on) {
720		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
721			 dev_priv->gart_vm_start,
722			 (long)dev_priv->gart_info.bus_addr,
723			 dev_priv->gart_size);
724
725		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
726
727		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
728			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
729							     RS690_BLOCK_GFX_D3_EN));
730		else
731			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
732
733		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
734							       RS480_VA_SIZE_32MB));
735
736		temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
737		IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
738							RS480_TLB_ENABLE |
739							RS480_GTW_LAC_EN |
740							RS480_1LEVEL_GART));
741
742		temp = dev_priv->gart_info.bus_addr & 0xfffff000;
743		temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
744		IGP_WRITE_MCIND(RS480_GART_BASE, temp);
745
746		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
747		IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
748						      RS480_REQ_TYPE_SNOOP_DIS));
749
750		radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
751
752		dev_priv->gart_size = 32*1024*1024;
753		temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
754			0xffff0000) | (dev_priv->gart_vm_start >> 16));
755
756		radeon_write_agp_location(dev_priv, temp);
757
758		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
759		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
760							       RS480_VA_SIZE_32MB));
761
762		do {
763			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
764			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
765				break;
766			DRM_UDELAY(1);
767		} while(1);
768
769		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
770				RS480_GART_CACHE_INVALIDATE);
771
772		do {
773			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
774			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
775				break;
776			DRM_UDELAY(1);
777		} while(1);
778
779		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
780	} else {
781		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
782	}
783}
784
785static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
786{
787	u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
788	if (on) {
789
790		DRM_DEBUG("programming pcie %08X %08lX %08X\n",
791			  dev_priv->gart_vm_start,
792			  (long)dev_priv->gart_info.bus_addr,
793			  dev_priv->gart_size);
794		RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
795				  dev_priv->gart_vm_start);
796		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
797				  dev_priv->gart_info.bus_addr);
798		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
799				  dev_priv->gart_vm_start);
800		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
801				  dev_priv->gart_vm_start +
802				  dev_priv->gart_size - 1);
803
804		radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
805
806		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
807				  RADEON_PCIE_TX_GART_EN);
808	} else {
809		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
810				  tmp & ~RADEON_PCIE_TX_GART_EN);
811	}
812}
813
814/* Enable or disable PCI GART on the chip */
815static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
816{
817	u32 tmp;
818
819	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
820	    (dev_priv->flags & RADEON_IS_IGPGART)) {
821		radeon_set_igpgart(dev_priv, on);
822		return;
823	}
824
825	if (dev_priv->flags & RADEON_IS_PCIE) {
826		radeon_set_pciegart(dev_priv, on);
827		return;
828	}
829
830	tmp = RADEON_READ(RADEON_AIC_CNTL);
831
832	if (on) {
833		RADEON_WRITE(RADEON_AIC_CNTL,
834			     tmp | RADEON_PCIGART_TRANSLATE_EN);
835
836		/* set PCI GART page-table base address
837		 */
838		RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
839
840		/* set address range for PCI address translate
841		 */
842		RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
843		RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
844			     + dev_priv->gart_size - 1);
845
846		/* Turn off AGP aperture -- is this required for PCI GART?
847		 */
848		radeon_write_agp_location(dev_priv, 0xffffffc0);
849		RADEON_WRITE(RADEON_AGP_COMMAND, 0);	/* clear AGP_COMMAND */
850	} else {
851		RADEON_WRITE(RADEON_AIC_CNTL,
852			     tmp & ~RADEON_PCIGART_TRANSLATE_EN);
853	}
854}
855
856static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
857{
858	drm_radeon_private_t *dev_priv = dev->dev_private;
859
860	DRM_DEBUG("\n");
861
862	/* if we require new memory map but we don't have it fail */
863	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
864		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
865		radeon_do_cleanup_cp(dev);
866		return -EINVAL;
867	}
868
869	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP))
870	{
871		DRM_DEBUG("Forcing AGP card to PCI mode\n");
872		dev_priv->flags &= ~RADEON_IS_AGP;
873	}
874	else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
875		 && !init->is_pci)
876	{
877		DRM_DEBUG("Restoring AGP flag\n");
878		dev_priv->flags |= RADEON_IS_AGP;
879	}
880
881	if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
882		DRM_ERROR("PCI GART memory not allocated!\n");
883		radeon_do_cleanup_cp(dev);
884		return -EINVAL;
885	}
886
887	dev_priv->usec_timeout = init->usec_timeout;
888	if (dev_priv->usec_timeout < 1 ||
889	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
890		DRM_DEBUG("TIMEOUT problem!\n");
891		radeon_do_cleanup_cp(dev);
892		return -EINVAL;
893	}
894
895	/* Enable vblank on CRTC1 for older X servers
896	 */
897	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
898
899	dev_priv->do_boxes = 0;
900	dev_priv->cp_mode = init->cp_mode;
901
902	/* We don't support anything other than bus-mastering ring mode,
903	 * but the ring can be in either AGP or PCI space for the ring
904	 * read pointer.
905	 */
906	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
907	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
908		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
909		radeon_do_cleanup_cp(dev);
910		return -EINVAL;
911	}
912
913	switch (init->fb_bpp) {
914	case 16:
915		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
916		break;
917	case 32:
918	default:
919		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
920		break;
921	}
922	dev_priv->front_offset = init->front_offset;
923	dev_priv->front_pitch = init->front_pitch;
924	dev_priv->back_offset = init->back_offset;
925	dev_priv->back_pitch = init->back_pitch;
926
927	switch (init->depth_bpp) {
928	case 16:
929		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
930		break;
931	case 32:
932	default:
933		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
934		break;
935	}
936	dev_priv->depth_offset = init->depth_offset;
937	dev_priv->depth_pitch = init->depth_pitch;
938
939	/* Hardware state for depth clears.  Remove this if/when we no
940	 * longer clear the depth buffer with a 3D rectangle.  Hard-code
941	 * all values to prevent unwanted 3D state from slipping through
942	 * and screwing with the clear operation.
943	 */
944	dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
945					   (dev_priv->color_fmt << 10) |
946					   (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
947
948	dev_priv->depth_clear.rb3d_zstencilcntl =
949	    (dev_priv->depth_fmt |
950	     RADEON_Z_TEST_ALWAYS |
951	     RADEON_STENCIL_TEST_ALWAYS |
952	     RADEON_STENCIL_S_FAIL_REPLACE |
953	     RADEON_STENCIL_ZPASS_REPLACE |
954	     RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
955
956	dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
957					 RADEON_BFACE_SOLID |
958					 RADEON_FFACE_SOLID |
959					 RADEON_FLAT_SHADE_VTX_LAST |
960					 RADEON_DIFFUSE_SHADE_FLAT |
961					 RADEON_ALPHA_SHADE_FLAT |
962					 RADEON_SPECULAR_SHADE_FLAT |
963					 RADEON_FOG_SHADE_FLAT |
964					 RADEON_VTX_PIX_CENTER_OGL |
965					 RADEON_ROUND_MODE_TRUNC |
966					 RADEON_ROUND_PREC_8TH_PIX);
967
968
969	dev_priv->ring_offset = init->ring_offset;
970	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
971	dev_priv->buffers_offset = init->buffers_offset;
972	dev_priv->gart_textures_offset = init->gart_textures_offset;
973
974	dev_priv->sarea = drm_getsarea(dev);
975	if (!dev_priv->sarea) {
976		DRM_ERROR("could not find sarea!\n");
977		radeon_do_cleanup_cp(dev);
978		return -EINVAL;
979	}
980
981	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
982	if (!dev_priv->cp_ring) {
983		DRM_ERROR("could not find cp ring region!\n");
984		radeon_do_cleanup_cp(dev);
985		return -EINVAL;
986	}
987	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
988	if (!dev_priv->ring_rptr) {
989		DRM_ERROR("could not find ring read pointer!\n");
990		radeon_do_cleanup_cp(dev);
991		return -EINVAL;
992	}
993	dev->agp_buffer_token = init->buffers_offset;
994	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
995	if (!dev->agp_buffer_map) {
996		DRM_ERROR("could not find dma buffer region!\n");
997		radeon_do_cleanup_cp(dev);
998		return -EINVAL;
999	}
1000
1001	if (init->gart_textures_offset) {
1002		dev_priv->gart_textures =
1003		    drm_core_findmap(dev, init->gart_textures_offset);
1004		if (!dev_priv->gart_textures) {
1005			DRM_ERROR("could not find GART texture region!\n");
1006			radeon_do_cleanup_cp(dev);
1007			return -EINVAL;
1008		}
1009	}
1010
1011	dev_priv->sarea_priv =
1012	    (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
1013				    init->sarea_priv_offset);
1014
1015#if __OS_HAS_AGP
1016	if (dev_priv->flags & RADEON_IS_AGP) {
1017		drm_core_ioremap(dev_priv->cp_ring, dev);
1018		drm_core_ioremap(dev_priv->ring_rptr, dev);
1019		drm_core_ioremap(dev->agp_buffer_map, dev);
1020		if (!dev_priv->cp_ring->handle ||
1021		    !dev_priv->ring_rptr->handle ||
1022		    !dev->agp_buffer_map->handle) {
1023			DRM_ERROR("could not find ioremap agp regions!\n");
1024			radeon_do_cleanup_cp(dev);
1025			return -EINVAL;
1026		}
1027	} else
1028#endif
1029	{
1030		dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
1031		dev_priv->ring_rptr->handle =
1032		    (void *)dev_priv->ring_rptr->offset;
1033		dev->agp_buffer_map->handle =
1034		    (void *)dev->agp_buffer_map->offset;
1035
1036		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1037			  dev_priv->cp_ring->handle);
1038		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
1039			  dev_priv->ring_rptr->handle);
1040		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
1041			  dev->agp_buffer_map->handle);
1042	}
1043
1044	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
1045	dev_priv->fb_size =
1046		((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
1047		- dev_priv->fb_location;
1048
1049	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
1050					((dev_priv->front_offset
1051					  + dev_priv->fb_location) >> 10));
1052
1053	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
1054				       ((dev_priv->back_offset
1055					 + dev_priv->fb_location) >> 10));
1056
1057	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
1058					((dev_priv->depth_offset
1059					  + dev_priv->fb_location) >> 10));
1060
1061	dev_priv->gart_size = init->gart_size;
1062
1063	/* New let's set the memory map ... */
1064	if (dev_priv->new_memmap) {
1065		u32 base = 0;
1066
1067		DRM_INFO("Setting GART location based on new memory map\n");
1068
1069		/* If using AGP, try to locate the AGP aperture at the same
1070		 * location in the card and on the bus, though we have to
1071		 * align it down.
1072		 */
1073#if __OS_HAS_AGP
1074		if (dev_priv->flags & RADEON_IS_AGP) {
1075			base = dev->agp->base;
1076			/* Check if valid */
1077			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
1078			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
1079				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
1080					 dev->agp->base);
1081				base = 0;
1082			}
1083		}
1084#endif
1085		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
1086		if (base == 0) {
1087			base = dev_priv->fb_location + dev_priv->fb_size;
1088			if (base < dev_priv->fb_location ||
1089			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
1090				base = dev_priv->fb_location
1091					- dev_priv->gart_size;
1092		}
1093		dev_priv->gart_vm_start = base & 0xffc00000u;
1094		if (dev_priv->gart_vm_start != base)
1095			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
1096				 base, dev_priv->gart_vm_start);
1097	} else {
1098		DRM_INFO("Setting GART location based on old memory map\n");
1099		dev_priv->gart_vm_start = dev_priv->fb_location +
1100			RADEON_READ(RADEON_CONFIG_APER_SIZE);
1101	}
1102
1103#if __OS_HAS_AGP
1104	if (dev_priv->flags & RADEON_IS_AGP)
1105		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1106						 - dev->agp->base
1107						 + dev_priv->gart_vm_start);
1108	else
1109#endif
1110		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1111					- (unsigned long)dev->sg->virtual
1112					+ dev_priv->gart_vm_start);
1113
1114	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
1115	DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
1116	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
1117		  dev_priv->gart_buffers_offset);
1118
1119	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
1120	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1121			      + init->ring_size / sizeof(u32));
1122	dev_priv->ring.size = init->ring_size;
1123	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
1124
1125	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1126	dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
1127
1128	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1129	dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
1130
1131	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1132
1133	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1134
1135#if __OS_HAS_AGP
1136	if (dev_priv->flags & RADEON_IS_AGP) {
1137		/* Turn off PCI GART */
1138		radeon_set_pcigart(dev_priv, 0);
1139	} else
1140#endif
1141	{
1142		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
1143		/* if we have an offset set from userspace */
1144		if (dev_priv->pcigart_offset_set) {
1145			dev_priv->gart_info.bus_addr =
1146			    dev_priv->pcigart_offset + dev_priv->fb_location;
1147			dev_priv->gart_info.mapping.offset =
1148			    dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
1149			dev_priv->gart_info.mapping.size =
1150			    dev_priv->gart_info.table_size;
1151
1152			drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
1153			dev_priv->gart_info.addr =
1154			    dev_priv->gart_info.mapping.handle;
1155
1156			if (dev_priv->flags & RADEON_IS_PCIE)
1157				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
1158			else
1159				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1160			dev_priv->gart_info.gart_table_location =
1161			    DRM_ATI_GART_FB;
1162
1163			DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
1164				  dev_priv->gart_info.addr,
1165				  dev_priv->pcigart_offset);
1166		} else {
1167			if (dev_priv->flags & RADEON_IS_IGPGART)
1168				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
1169			else
1170				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1171			dev_priv->gart_info.gart_table_location =
1172			    DRM_ATI_GART_MAIN;
1173			dev_priv->gart_info.addr = NULL;
1174			dev_priv->gart_info.bus_addr = 0;
1175			if (dev_priv->flags & RADEON_IS_PCIE) {
1176				DRM_ERROR
1177				    ("Cannot use PCI Express without GART in FB memory\n");
1178				radeon_do_cleanup_cp(dev);
1179				return -EINVAL;
1180			}
1181		}
1182
1183		if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1184			DRM_ERROR("failed to init PCI GART!\n");
1185			radeon_do_cleanup_cp(dev);
1186			return -ENOMEM;
1187		}
1188
1189		/* Turn on PCI GART */
1190		radeon_set_pcigart(dev_priv, 1);
1191	}
1192
1193	radeon_cp_load_microcode(dev_priv);
1194	radeon_cp_init_ring_buffer(dev, dev_priv);
1195
1196	dev_priv->last_buf = 0;
1197
1198	radeon_do_engine_reset(dev);
1199	radeon_test_writeback(dev_priv);
1200
1201	return 0;
1202}
1203
1204static int radeon_do_cleanup_cp(struct drm_device * dev)
1205{
1206	drm_radeon_private_t *dev_priv = dev->dev_private;
1207	DRM_DEBUG("\n");
1208
1209	/* Make sure interrupts are disabled here because the uninstall ioctl
1210	 * may not have been called from userspace and after dev_private
1211	 * is freed, it's too late.
1212	 */
1213	if (dev->irq_enabled)
1214		drm_irq_uninstall(dev);
1215
1216#if __OS_HAS_AGP
1217	if (dev_priv->flags & RADEON_IS_AGP) {
1218		if (dev_priv->cp_ring != NULL) {
1219			drm_core_ioremapfree(dev_priv->cp_ring, dev);
1220			dev_priv->cp_ring = NULL;
1221		}
1222		if (dev_priv->ring_rptr != NULL) {
1223			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
1224			dev_priv->ring_rptr = NULL;
1225		}
1226		if (dev->agp_buffer_map != NULL) {
1227			drm_core_ioremapfree(dev->agp_buffer_map, dev);
1228			dev->agp_buffer_map = NULL;
1229		}
1230	} else
1231#endif
1232	{
1233
1234		if (dev_priv->gart_info.bus_addr) {
1235			/* Turn off PCI GART */
1236			radeon_set_pcigart(dev_priv, 0);
1237			if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1238				DRM_ERROR("failed to cleanup PCI GART!\n");
1239		}
1240
1241		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1242		{
1243			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1244			dev_priv->gart_info.addr = 0;
1245		}
1246	}
1247	/* only clear to the start of flags */
1248	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1249
1250	return 0;
1251}
1252
1253/* This code will reinit the Radeon CP hardware after a resume from disc.
1254 * AFAIK, it would be very difficult to pickle the state at suspend time, so
1255 * here we make sure that all Radeon hardware initialisation is re-done without
1256 * affecting running applications.
1257 *
1258 * Charl P. Botha <http://cpbotha.net>
1259 */
1260static int radeon_do_resume_cp(struct drm_device * dev)
1261{
1262	drm_radeon_private_t *dev_priv = dev->dev_private;
1263
1264	if (!dev_priv) {
1265		DRM_ERROR("Called with no initialization\n");
1266		return -EINVAL;
1267	}
1268
1269	DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1270
1271#if __OS_HAS_AGP
1272	if (dev_priv->flags & RADEON_IS_AGP) {
1273		/* Turn off PCI GART */
1274		radeon_set_pcigart(dev_priv, 0);
1275	} else
1276#endif
1277	{
1278		/* Turn on PCI GART */
1279		radeon_set_pcigart(dev_priv, 1);
1280	}
1281
1282	radeon_cp_load_microcode(dev_priv);
1283	radeon_cp_init_ring_buffer(dev, dev_priv);
1284
1285	radeon_do_engine_reset(dev);
1286	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1287
1288	DRM_DEBUG("radeon_do_resume_cp() complete\n");
1289
1290	return 0;
1291}
1292
1293int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1294{
1295	drm_radeon_init_t *init = data;
1296
1297	LOCK_TEST_WITH_RETURN(dev, file_priv);
1298
1299	if (init->func == RADEON_INIT_R300_CP)
1300		r300_init_reg_flags(dev);
1301
1302	switch (init->func) {
1303	case RADEON_INIT_CP:
1304	case RADEON_INIT_R200_CP:
1305	case RADEON_INIT_R300_CP:
1306		return radeon_do_init_cp(dev, init);
1307	case RADEON_CLEANUP_CP:
1308		return radeon_do_cleanup_cp(dev);
1309	}
1310
1311	return -EINVAL;
1312}
1313
1314int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1315{
1316	drm_radeon_private_t *dev_priv = dev->dev_private;
1317	DRM_DEBUG("\n");
1318
1319	LOCK_TEST_WITH_RETURN(dev, file_priv);
1320
1321	if (dev_priv->cp_running) {
1322		DRM_DEBUG("while CP running\n");
1323		return 0;
1324	}
1325	if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
1326		DRM_DEBUG("called with bogus CP mode (%d)\n",
1327			  dev_priv->cp_mode);
1328		return 0;
1329	}
1330
1331	radeon_do_cp_start(dev_priv);
1332
1333	return 0;
1334}
1335
1336/* Stop the CP.  The engine must have been idled before calling this
1337 * routine.
1338 */
1339int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1340{
1341	drm_radeon_private_t *dev_priv = dev->dev_private;
1342	drm_radeon_cp_stop_t *stop = data;
1343	int ret;
1344	DRM_DEBUG("\n");
1345
1346	LOCK_TEST_WITH_RETURN(dev, file_priv);
1347
1348	if (!dev_priv->cp_running)
1349		return 0;
1350
1351	/* Flush any pending CP commands.  This ensures any outstanding
1352	 * commands are exectuted by the engine before we turn it off.
1353	 */
1354	if (stop->flush) {
1355		radeon_do_cp_flush(dev_priv);
1356	}
1357
1358	/* If we fail to make the engine go idle, we return an error
1359	 * code so that the DRM ioctl wrapper can try again.
1360	 */
1361	if (stop->idle) {
1362		ret = radeon_do_cp_idle(dev_priv);
1363		if (ret)
1364			return ret;
1365	}
1366
1367	/* Finally, we can turn off the CP.  If the engine isn't idle,
1368	 * we will get some dropped triangles as they won't be fully
1369	 * rendered before the CP is shut down.
1370	 */
1371	radeon_do_cp_stop(dev_priv);
1372
1373	/* Reset the engine */
1374	radeon_do_engine_reset(dev);
1375
1376	return 0;
1377}
1378
1379void radeon_do_release(struct drm_device * dev)
1380{
1381	drm_radeon_private_t *dev_priv = dev->dev_private;
1382	int i, ret;
1383
1384	if (dev_priv) {
1385		if (dev_priv->cp_running) {
1386			/* Stop the cp */
1387			while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
1388				DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1389#ifdef __linux__
1390				schedule();
1391#else
1392#if defined(__FreeBSD__) && __FreeBSD_version > 500000
1393				mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel",
1394				       1);
1395#else
1396				tsleep(&ret, PZERO, "rdnrel", 1);
1397#endif
1398#endif
1399			}
1400			radeon_do_cp_stop(dev_priv);
1401			radeon_do_engine_reset(dev);
1402		}
1403
1404		/* Disable *all* interrupts */
1405		if (dev_priv->mmio)	/* remove this after permanent addmaps */
1406			RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
1407
1408		if (dev_priv->mmio) {	/* remove all surfaces */
1409			for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1410				RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
1411				RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
1412					     16 * i, 0);
1413				RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
1414					     16 * i, 0);
1415			}
1416		}
1417
1418		/* Free memory heap structures */
1419		radeon_mem_takedown(&(dev_priv->gart_heap));
1420		radeon_mem_takedown(&(dev_priv->fb_heap));
1421
1422		/* deallocate kernel resources */
1423		radeon_do_cleanup_cp(dev);
1424	}
1425}
1426
1427/* Just reset the CP ring.  Called as part of an X Server engine reset.
1428 */
1429int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1430{
1431	drm_radeon_private_t *dev_priv = dev->dev_private;
1432	DRM_DEBUG("\n");
1433
1434	LOCK_TEST_WITH_RETURN(dev, file_priv);
1435
1436	if (!dev_priv) {
1437		DRM_DEBUG("called before init done\n");
1438		return -EINVAL;
1439	}
1440
1441	radeon_do_cp_reset(dev_priv);
1442
1443	/* The CP is no longer running after an engine reset */
1444	dev_priv->cp_running = 0;
1445
1446	return 0;
1447}
1448
1449int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1450{
1451	drm_radeon_private_t *dev_priv = dev->dev_private;
1452	DRM_DEBUG("\n");
1453
1454	LOCK_TEST_WITH_RETURN(dev, file_priv);
1455
1456	return radeon_do_cp_idle(dev_priv);
1457}
1458
1459/* Added by Charl P. Botha to call radeon_do_resume_cp().
1460 */
1461int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1462{
1463
1464	return radeon_do_resume_cp(dev);
1465}
1466
1467int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1468{
1469	DRM_DEBUG("\n");
1470
1471	LOCK_TEST_WITH_RETURN(dev, file_priv);
1472
1473	return radeon_do_engine_reset(dev);
1474}
1475
1476/* ================================================================
1477 * Fullscreen mode
1478 */
1479
1480/* KW: Deprecated to say the least:
1481 */
1482int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
1483{
1484	return 0;
1485}
1486
1487/* ================================================================
1488 * Freelist management
1489 */
1490
1491/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
1492 *   bufs until freelist code is used.  Note this hides a problem with
1493 *   the scratch register * (used to keep track of last buffer
1494 *   completed) being written to before * the last buffer has actually
1495 *   completed rendering.
1496 *
1497 * KW:  It's also a good way to find free buffers quickly.
1498 *
1499 * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
1500 * sleep.  However, bugs in older versions of radeon_accel.c mean that
1501 * we essentially have to do this, else old clients will break.
1502 *
1503 * However, it does leave open a potential deadlock where all the
1504 * buffers are held by other clients, which can't release them because
1505 * they can't get the lock.
1506 */
1507
1508struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1509{
1510	struct drm_device_dma *dma = dev->dma;
1511	drm_radeon_private_t *dev_priv = dev->dev_private;
1512	drm_radeon_buf_priv_t *buf_priv;
1513	struct drm_buf *buf;
1514	int i, t;
1515	int start;
1516
1517	if (++dev_priv->last_buf >= dma->buf_count)
1518		dev_priv->last_buf = 0;
1519
1520	start = dev_priv->last_buf;
1521
1522	for (t = 0; t < dev_priv->usec_timeout; t++) {
1523		u32 done_age = GET_SCRATCH(1);
1524		DRM_DEBUG("done_age = %d\n", done_age);
1525		for (i = start; i < dma->buf_count; i++) {
1526			buf = dma->buflist[i];
1527			buf_priv = buf->dev_private;
1528			if (buf->file_priv == NULL || (buf->pending &&
1529						       buf_priv->age <=
1530						       done_age)) {
1531				dev_priv->stats.requested_bufs++;
1532				buf->pending = 0;
1533				return buf;
1534			}
1535			start = 0;
1536		}
1537
1538		if (t) {
1539			DRM_UDELAY(1);
1540			dev_priv->stats.freelist_loops++;
1541		}
1542	}
1543
1544	DRM_DEBUG("returning NULL!\n");
1545	return NULL;
1546}
1547
1548#if 0
1549struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1550{
1551	struct drm_device_dma *dma = dev->dma;
1552	drm_radeon_private_t *dev_priv = dev->dev_private;
1553	drm_radeon_buf_priv_t *buf_priv;
1554	struct drm_buf *buf;
1555	int i, t;
1556	int start;
1557	u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
1558
1559	if (++dev_priv->last_buf >= dma->buf_count)
1560		dev_priv->last_buf = 0;
1561
1562	start = dev_priv->last_buf;
1563	dev_priv->stats.freelist_loops++;
1564
1565	for (t = 0; t < 2; t++) {
1566		for (i = start; i < dma->buf_count; i++) {
1567			buf = dma->buflist[i];
1568			buf_priv = buf->dev_private;
1569			if (buf->file_priv == 0 || (buf->pending &&
1570						    buf_priv->age <=
1571						    done_age)) {
1572				dev_priv->stats.requested_bufs++;
1573				buf->pending = 0;
1574				return buf;
1575			}
1576		}
1577		start = 0;
1578	}
1579
1580	return NULL;
1581}
1582#endif
1583
1584void radeon_freelist_reset(struct drm_device * dev)
1585{
1586	struct drm_device_dma *dma = dev->dma;
1587	drm_radeon_private_t *dev_priv = dev->dev_private;
1588	int i;
1589
1590	dev_priv->last_buf = 0;
1591	for (i = 0; i < dma->buf_count; i++) {
1592		struct drm_buf *buf = dma->buflist[i];
1593		drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1594		buf_priv->age = 0;
1595	}
1596}
1597
1598/* ================================================================
1599 * CP command submission
1600 */
1601
1602int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
1603{
1604	drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
1605	int i;
1606	u32 last_head = GET_RING_HEAD(dev_priv);
1607
1608	for (i = 0; i < dev_priv->usec_timeout; i++) {
1609		u32 head = GET_RING_HEAD(dev_priv);
1610
1611		ring->space = (head - ring->tail) * sizeof(u32);
1612		if (ring->space <= 0)
1613			ring->space += ring->size;
1614		if (ring->space > n)
1615			return 0;
1616
1617		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
1618
1619		if (head != last_head)
1620			i = 0;
1621		last_head = head;
1622
1623		DRM_UDELAY(1);
1624	}
1625
1626	/* FIXME: This return value is ignored in the BEGIN_RING macro! */
1627#if RADEON_FIFO_DEBUG
1628	radeon_status(dev_priv);
1629	DRM_ERROR("failed!\n");
1630#endif
1631	return -EBUSY;
1632}
1633
1634static int radeon_cp_get_buffers(struct drm_device *dev,
1635				 struct drm_file *file_priv,
1636				 struct drm_dma * d)
1637{
1638	int i;
1639	struct drm_buf *buf;
1640
1641	for (i = d->granted_count; i < d->request_count; i++) {
1642		buf = radeon_freelist_get(dev);
1643		if (!buf)
1644			return -EBUSY;	/* NOTE: broken client */
1645
1646		buf->file_priv = file_priv;
1647
1648		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1649				     sizeof(buf->idx)))
1650			return -EFAULT;
1651		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1652				     sizeof(buf->total)))
1653			return -EFAULT;
1654
1655		d->granted_count++;
1656	}
1657	return 0;
1658}
1659
1660int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1661{
1662	struct drm_device_dma *dma = dev->dma;
1663	int ret = 0;
1664	struct drm_dma *d = data;
1665
1666	LOCK_TEST_WITH_RETURN(dev, file_priv);
1667
1668	/* Please don't send us buffers.
1669	 */
1670	if (d->send_count != 0) {
1671		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1672			  DRM_CURRENTPID, d->send_count);
1673		return -EINVAL;
1674	}
1675
1676	/* We'll send you buffers.
1677	 */
1678	if (d->request_count < 0 || d->request_count > dma->buf_count) {
1679		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1680			  DRM_CURRENTPID, d->request_count, dma->buf_count);
1681		return -EINVAL;
1682	}
1683
1684	d->granted_count = 0;
1685
1686	if (d->request_count) {
1687		ret = radeon_cp_get_buffers(dev, file_priv, d);
1688	}
1689
1690	return ret;
1691}
1692
1693int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1694{
1695	drm_radeon_private_t *dev_priv;
1696	int ret = 0;
1697
1698	dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
1699	if (dev_priv == NULL)
1700		return -ENOMEM;
1701
1702	memset(dev_priv, 0, sizeof(drm_radeon_private_t));
1703	dev->dev_private = (void *)dev_priv;
1704	dev_priv->flags = flags;
1705
1706	switch (flags & RADEON_FAMILY_MASK) {
1707	case CHIP_R100:
1708	case CHIP_RV200:
1709	case CHIP_R200:
1710	case CHIP_R300:
1711	case CHIP_R350:
1712	case CHIP_R420:
1713	case CHIP_RV410:
1714	case CHIP_RV515:
1715	case CHIP_R520:
1716	case CHIP_RV570:
1717	case CHIP_R580:
1718		dev_priv->flags |= RADEON_HAS_HIERZ;
1719		break;
1720	default:
1721		/* all other chips have no hierarchical z buffer */
1722		break;
1723	}
1724
1725	dev_priv->chip_family = flags & RADEON_FAMILY_MASK;
1726	if (drm_device_is_agp(dev))
1727		dev_priv->flags |= RADEON_IS_AGP;
1728	else if (drm_device_is_pcie(dev))
1729		dev_priv->flags |= RADEON_IS_PCIE;
1730	else
1731		dev_priv->flags |= RADEON_IS_PCI;
1732
1733	DRM_DEBUG("%s card detected\n",
1734		  ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1735	return ret;
1736}
1737
1738/* Create mappings for registers and framebuffer so userland doesn't necessarily
1739 * have to find them.
1740 */
1741int radeon_driver_firstopen(struct drm_device *dev)
1742{
1743	int ret;
1744	drm_local_map_t *map;
1745	drm_radeon_private_t *dev_priv = dev->dev_private;
1746
1747	dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1748
1749	ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1750			 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1751			 _DRM_READ_ONLY, &dev_priv->mmio);
1752	if (ret != 0)
1753		return ret;
1754
1755	dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1756	ret = drm_addmap(dev, dev_priv->fb_aper_offset,
1757			 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
1758			 _DRM_WRITE_COMBINING, &map);
1759	if (ret != 0)
1760		return ret;
1761
1762	return 0;
1763}
1764
1765int radeon_driver_unload(struct drm_device *dev)
1766{
1767	drm_radeon_private_t *dev_priv = dev->dev_private;
1768
1769	DRM_DEBUG("\n");
1770	drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1771
1772	dev->dev_private = NULL;
1773	return 0;
1774}
1775