radeon_cp.c revision 189130
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ 2/*- 3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 5 * Copyright 2007 Advanced Micro Devices, Inc. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 26 * 27 * Authors: 28 * Kevin E. Martin <martin@valinux.com> 29 * Gareth Hughes <gareth@valinux.com> 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/dev/drm/radeon_cp.c 189130 2009-02-28 02:37:55Z rnoland $"); 34 35#include "dev/drm/drmP.h" 36#include "dev/drm/drm.h" 37#include "dev/drm/radeon_drm.h" 38#include "dev/drm/radeon_drv.h" 39#include "dev/drm/r300_reg.h" 40 41#include "dev/drm/radeon_microcode.h" 42#define RADEON_FIFO_DEBUG 0 43 44static int radeon_do_cleanup_cp(struct drm_device * dev); 45static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); 46 47static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 48{ 49 u32 ret; 50 RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); 51 ret = RADEON_READ(R520_MC_IND_DATA); 52 RADEON_WRITE(R520_MC_IND_INDEX, 0); 53 return ret; 54} 55 56static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 57{ 58 u32 ret; 59 RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); 60 ret = RADEON_READ(RS480_NB_MC_DATA); 61 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); 62 return ret; 63} 64 65static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 66{ 67 u32 ret; 68 RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); 69 ret = RADEON_READ(RS690_MC_DATA); 70 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); 71 return ret; 72} 73 74static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 75{ 76 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 77 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 78 return RS690_READ_MCIND(dev_priv, addr); 79 else 80 return RS480_READ_MCIND(dev_priv, addr); 81} 82 83u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) 84{ 85 86 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 87 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); 88 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 89 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 90 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); 91 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 92 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); 93 else 94 return RADEON_READ(RADEON_MC_FB_LOCATION); 95} 96 97static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) 98{ 99 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 100 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); 101 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 102 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 103 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); 104 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 105 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); 106 else 107 RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); 108} 109 110static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) 111{ 112 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 113 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); 114 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 115 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 116 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); 117 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 118 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); 119 else 120 RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); 121} 122 123static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) 124{ 125 u32 agp_base_hi = upper_32_bits(agp_base); 126 u32 agp_base_lo = agp_base & 0xffffffff; 127 128 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { 129 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); 130 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); 131 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 132 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 133 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); 134 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); 135 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { 136 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); 137 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); 138 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 139 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 140 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 141 RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); 142 } else { 143 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 144 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) 145 RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); 146 } 147} 148 149static int RADEON_READ_PLL(struct drm_device * dev, int addr) 150{ 151 drm_radeon_private_t *dev_priv = dev->dev_private; 152 153 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); 154 return RADEON_READ(RADEON_CLOCK_CNTL_DATA); 155} 156 157static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) 158{ 159 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); 160 return RADEON_READ(RADEON_PCIE_DATA); 161} 162 163#if RADEON_FIFO_DEBUG 164static void radeon_status(drm_radeon_private_t * dev_priv) 165{ 166 printk("%s:\n", __FUNCTION__); 167 printk("RBBM_STATUS = 0x%08x\n", 168 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); 169 printk("CP_RB_RTPR = 0x%08x\n", 170 (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); 171 printk("CP_RB_WTPR = 0x%08x\n", 172 (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); 173 printk("AIC_CNTL = 0x%08x\n", 174 (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); 175 printk("AIC_STAT = 0x%08x\n", 176 (unsigned int)RADEON_READ(RADEON_AIC_STAT)); 177 printk("AIC_PT_BASE = 0x%08x\n", 178 (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); 179 printk("TLB_ADDR = 0x%08x\n", 180 (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); 181 printk("TLB_DATA = 0x%08x\n", 182 (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); 183} 184#endif 185 186/* ================================================================ 187 * Engine, FIFO control 188 */ 189 190static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) 191{ 192 u32 tmp; 193 int i; 194 195 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 196 197 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { 198 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); 199 tmp |= RADEON_RB3D_DC_FLUSH_ALL; 200 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); 201 202 for (i = 0; i < dev_priv->usec_timeout; i++) { 203 if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) 204 & RADEON_RB3D_DC_BUSY)) { 205 return 0; 206 } 207 DRM_UDELAY(1); 208 } 209 } else { 210 /* don't flush or purge cache here or lockup */ 211 return 0; 212 } 213 214#if RADEON_FIFO_DEBUG 215 DRM_ERROR("failed!\n"); 216 radeon_status(dev_priv); 217#endif 218 return -EBUSY; 219} 220 221static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) 222{ 223 int i; 224 225 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 226 227 for (i = 0; i < dev_priv->usec_timeout; i++) { 228 int slots = (RADEON_READ(RADEON_RBBM_STATUS) 229 & RADEON_RBBM_FIFOCNT_MASK); 230 if (slots >= entries) 231 return 0; 232 DRM_UDELAY(1); 233 } 234 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", 235 RADEON_READ(RADEON_RBBM_STATUS), 236 RADEON_READ(R300_VAP_CNTL_STATUS)); 237 238#if RADEON_FIFO_DEBUG 239 DRM_ERROR("failed!\n"); 240 radeon_status(dev_priv); 241#endif 242 return -EBUSY; 243} 244 245static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) 246{ 247 int i, ret; 248 249 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 250 251 ret = radeon_do_wait_for_fifo(dev_priv, 64); 252 if (ret) 253 return ret; 254 255 for (i = 0; i < dev_priv->usec_timeout; i++) { 256 if (!(RADEON_READ(RADEON_RBBM_STATUS) 257 & RADEON_RBBM_ACTIVE)) { 258 radeon_do_pixcache_flush(dev_priv); 259 return 0; 260 } 261 DRM_UDELAY(1); 262 } 263 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", 264 RADEON_READ(RADEON_RBBM_STATUS), 265 RADEON_READ(R300_VAP_CNTL_STATUS)); 266 267#if RADEON_FIFO_DEBUG 268 DRM_ERROR("failed!\n"); 269 radeon_status(dev_priv); 270#endif 271 return -EBUSY; 272} 273 274static void radeon_init_pipes(drm_radeon_private_t * dev_priv) 275{ 276 uint32_t gb_tile_config, gb_pipe_sel = 0; 277 278 /* RS4xx/RS6xx/R4xx/R5xx */ 279 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { 280 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); 281 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 282 } else { 283 /* R3xx */ 284 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || 285 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 286 dev_priv->num_gb_pipes = 2; 287 } else { 288 /* R3Vxx */ 289 dev_priv->num_gb_pipes = 1; 290 } 291 } 292 DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); 293 294 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); 295 296 switch(dev_priv->num_gb_pipes) { 297 case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; 298 case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; 299 case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; 300 default: 301 case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; 302 } 303 304 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { 305 RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); 306 RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); 307 } 308 RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); 309 radeon_do_wait_for_idle(dev_priv); 310 RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); 311 RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | 312 R300_DC_AUTOFLUSH_ENABLE | 313 R300_DC_DC_DISABLE_IGNORE_PE)); 314 315 316} 317 318/* ================================================================ 319 * CP control, initialization 320 */ 321 322/* Load the microcode for the CP */ 323static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv) 324{ 325 int i; 326 DRM_DEBUG("\n"); 327 328 radeon_do_wait_for_idle(dev_priv); 329 330 RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); 331 332 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || 333 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || 334 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || 335 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || 336 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { 337 DRM_INFO("Loading R100 Microcode\n"); 338 for (i = 0; i < 256; i++) { 339 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 340 R100_cp_microcode[i][1]); 341 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 342 R100_cp_microcode[i][0]); 343 } 344 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || 345 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || 346 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || 347 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { 348 DRM_INFO("Loading R200 Microcode\n"); 349 for (i = 0; i < 256; i++) { 350 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 351 R200_cp_microcode[i][1]); 352 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 353 R200_cp_microcode[i][0]); 354 } 355 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || 356 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || 357 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || 358 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 359 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 360 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 361 DRM_INFO("Loading R300 Microcode\n"); 362 for (i = 0; i < 256; i++) { 363 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 364 R300_cp_microcode[i][1]); 365 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 366 R300_cp_microcode[i][0]); 367 } 368 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || 369 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) || 370 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { 371 DRM_INFO("Loading R400 Microcode\n"); 372 for (i = 0; i < 256; i++) { 373 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 374 R420_cp_microcode[i][1]); 375 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 376 R420_cp_microcode[i][0]); 377 } 378 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 379 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 380 DRM_INFO("Loading RS690/RS740 Microcode\n"); 381 for (i = 0; i < 256; i++) { 382 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 383 RS690_cp_microcode[i][1]); 384 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 385 RS690_cp_microcode[i][0]); 386 } 387 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || 388 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || 389 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || 390 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || 391 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || 392 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { 393 DRM_INFO("Loading R500 Microcode\n"); 394 for (i = 0; i < 256; i++) { 395 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 396 R520_cp_microcode[i][1]); 397 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 398 R520_cp_microcode[i][0]); 399 } 400 } 401} 402 403/* Flush any pending commands to the CP. This should only be used just 404 * prior to a wait for idle, as it informs the engine that the command 405 * stream is ending. 406 */ 407static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) 408{ 409 DRM_DEBUG("\n"); 410#if 0 411 u32 tmp; 412 413 tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); 414 RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); 415#endif 416} 417 418/* Wait for the CP to go idle. 419 */ 420int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) 421{ 422 RING_LOCALS; 423 DRM_DEBUG("\n"); 424 425 BEGIN_RING(6); 426 427 RADEON_PURGE_CACHE(); 428 RADEON_PURGE_ZCACHE(); 429 RADEON_WAIT_UNTIL_IDLE(); 430 431 ADVANCE_RING(); 432 COMMIT_RING(); 433 434 return radeon_do_wait_for_idle(dev_priv); 435} 436 437/* Start the Command Processor. 438 */ 439static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) 440{ 441 RING_LOCALS; 442 DRM_DEBUG("\n"); 443 444 radeon_do_wait_for_idle(dev_priv); 445 446 RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); 447 448 dev_priv->cp_running = 1; 449 450 BEGIN_RING(8); 451 /* isync can only be written through cp on r5xx write it here */ 452 OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); 453 OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | 454 RADEON_ISYNC_ANY3D_IDLE2D | 455 RADEON_ISYNC_WAIT_IDLEGUI | 456 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 457 RADEON_PURGE_CACHE(); 458 RADEON_PURGE_ZCACHE(); 459 RADEON_WAIT_UNTIL_IDLE(); 460 ADVANCE_RING(); 461 COMMIT_RING(); 462 463 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; 464} 465 466/* Reset the Command Processor. This will not flush any pending 467 * commands, so you must wait for the CP command stream to complete 468 * before calling this routine. 469 */ 470static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) 471{ 472 u32 cur_read_ptr; 473 DRM_DEBUG("\n"); 474 475 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); 476 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); 477 SET_RING_HEAD(dev_priv, cur_read_ptr); 478 dev_priv->ring.tail = cur_read_ptr; 479} 480 481/* Stop the Command Processor. This will not flush any pending 482 * commands, so you must flush the command stream and wait for the CP 483 * to go idle before calling this routine. 484 */ 485static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) 486{ 487 DRM_DEBUG("\n"); 488 489 RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); 490 491 dev_priv->cp_running = 0; 492} 493 494/* Reset the engine. This will stop the CP if it is running. 495 */ 496static int radeon_do_engine_reset(struct drm_device * dev) 497{ 498 drm_radeon_private_t *dev_priv = dev->dev_private; 499 u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; 500 DRM_DEBUG("\n"); 501 502 radeon_do_pixcache_flush(dev_priv); 503 504 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { 505 /* may need something similar for newer chips */ 506 clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); 507 mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); 508 509 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | 510 RADEON_FORCEON_MCLKA | 511 RADEON_FORCEON_MCLKB | 512 RADEON_FORCEON_YCLKA | 513 RADEON_FORCEON_YCLKB | 514 RADEON_FORCEON_MC | 515 RADEON_FORCEON_AIC)); 516 } 517 518 rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); 519 520 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | 521 RADEON_SOFT_RESET_CP | 522 RADEON_SOFT_RESET_HI | 523 RADEON_SOFT_RESET_SE | 524 RADEON_SOFT_RESET_RE | 525 RADEON_SOFT_RESET_PP | 526 RADEON_SOFT_RESET_E2 | 527 RADEON_SOFT_RESET_RB)); 528 RADEON_READ(RADEON_RBBM_SOFT_RESET); 529 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & 530 ~(RADEON_SOFT_RESET_CP | 531 RADEON_SOFT_RESET_HI | 532 RADEON_SOFT_RESET_SE | 533 RADEON_SOFT_RESET_RE | 534 RADEON_SOFT_RESET_PP | 535 RADEON_SOFT_RESET_E2 | 536 RADEON_SOFT_RESET_RB))); 537 RADEON_READ(RADEON_RBBM_SOFT_RESET); 538 539 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { 540 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); 541 RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); 542 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); 543 } 544 545 /* setup the raster pipes */ 546 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) 547 radeon_init_pipes(dev_priv); 548 549 /* Reset the CP ring */ 550 radeon_do_cp_reset(dev_priv); 551 552 /* The CP is no longer running after an engine reset */ 553 dev_priv->cp_running = 0; 554 555 /* Reset any pending vertex, indirect buffers */ 556 radeon_freelist_reset(dev); 557 558 return 0; 559} 560 561static void radeon_cp_init_ring_buffer(struct drm_device * dev, 562 drm_radeon_private_t * dev_priv) 563{ 564 u32 ring_start, cur_read_ptr; 565 u32 tmp; 566 567 /* Initialize the memory controller. With new memory map, the fb location 568 * is not changed, it should have been properly initialized already. Part 569 * of the problem is that the code below is bogus, assuming the GART is 570 * always appended to the fb which is not necessarily the case 571 */ 572 if (!dev_priv->new_memmap) 573 radeon_write_fb_location(dev_priv, 574 ((dev_priv->gart_vm_start - 1) & 0xffff0000) 575 | (dev_priv->fb_location >> 16)); 576 577#if __OS_HAS_AGP 578 if (dev_priv->flags & RADEON_IS_AGP) { 579 radeon_write_agp_base(dev_priv, dev->agp->base); 580 581 radeon_write_agp_location(dev_priv, 582 (((dev_priv->gart_vm_start - 1 + 583 dev_priv->gart_size) & 0xffff0000) | 584 (dev_priv->gart_vm_start >> 16))); 585 586 ring_start = (dev_priv->cp_ring->offset 587 - dev->agp->base 588 + dev_priv->gart_vm_start); 589 } else 590#endif 591 ring_start = (dev_priv->cp_ring->offset 592 - (unsigned long)dev->sg->virtual 593 + dev_priv->gart_vm_start); 594 595 RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); 596 597 /* Set the write pointer delay */ 598 RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); 599 600 /* Initialize the ring buffer's read and write pointers */ 601 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); 602 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); 603 SET_RING_HEAD(dev_priv, cur_read_ptr); 604 dev_priv->ring.tail = cur_read_ptr; 605 606#if __OS_HAS_AGP 607 if (dev_priv->flags & RADEON_IS_AGP) { 608 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, 609 dev_priv->ring_rptr->offset 610 - dev->agp->base + dev_priv->gart_vm_start); 611 } else 612#endif 613 { 614 struct drm_sg_mem *entry = dev->sg; 615 unsigned long tmp_ofs, page_ofs; 616 617 tmp_ofs = dev_priv->ring_rptr->offset - 618 (unsigned long)dev->sg->virtual; 619 page_ofs = tmp_ofs >> PAGE_SHIFT; 620 621 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); 622 DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n", 623 (unsigned long)entry->busaddr[page_ofs], 624 entry->handle + tmp_ofs); 625 } 626 627 /* Set ring buffer size */ 628#ifdef __BIG_ENDIAN 629 RADEON_WRITE(RADEON_CP_RB_CNTL, 630 RADEON_BUF_SWAP_32BIT | 631 (dev_priv->ring.fetch_size_l2ow << 18) | 632 (dev_priv->ring.rptr_update_l2qw << 8) | 633 dev_priv->ring.size_l2qw); 634#else 635 RADEON_WRITE(RADEON_CP_RB_CNTL, 636 (dev_priv->ring.fetch_size_l2ow << 18) | 637 (dev_priv->ring.rptr_update_l2qw << 8) | 638 dev_priv->ring.size_l2qw); 639#endif 640 641 /* Initialize the scratch register pointer. This will cause 642 * the scratch register values to be written out to memory 643 * whenever they are updated. 644 * 645 * We simply put this behind the ring read pointer, this works 646 * with PCI GART as well as (whatever kind of) AGP GART 647 */ 648 RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) 649 + RADEON_SCRATCH_REG_OFFSET); 650 651 dev_priv->scratch = ((__volatile__ u32 *) 652 dev_priv->ring_rptr->handle + 653 (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); 654 655 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); 656 657 /* Turn on bus mastering */ 658 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 659 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 660 /* rs600/rs690/rs740 */ 661 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 662 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 663 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || 664 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || 665 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 666 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 667 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 668 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 669 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 670 } /* PCIE cards appears to not need this */ 671 672 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; 673 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); 674 675 dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; 676 RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 677 dev_priv->sarea_priv->last_dispatch); 678 679 dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; 680 RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); 681 682 radeon_do_wait_for_idle(dev_priv); 683 684 /* Sync everything up */ 685 RADEON_WRITE(RADEON_ISYNC_CNTL, 686 (RADEON_ISYNC_ANY2D_IDLE3D | 687 RADEON_ISYNC_ANY3D_IDLE2D | 688 RADEON_ISYNC_WAIT_IDLEGUI | 689 RADEON_ISYNC_CPSCRATCH_IDLEGUI)); 690 691} 692 693static void radeon_test_writeback(drm_radeon_private_t * dev_priv) 694{ 695 u32 tmp; 696 697 /* Writeback doesn't seem to work everywhere, test it here and possibly 698 * enable it if it appears to work 699 */ 700 DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); 701 RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); 702 703 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { 704 if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == 705 0xdeadbeef) 706 break; 707 DRM_UDELAY(1); 708 } 709 710 if (tmp < dev_priv->usec_timeout) { 711 dev_priv->writeback_works = 1; 712 DRM_INFO("writeback test succeeded in %d usecs\n", tmp); 713 } else { 714 dev_priv->writeback_works = 0; 715 DRM_INFO("writeback test failed\n"); 716 } 717 if (radeon_no_wb == 1) { 718 dev_priv->writeback_works = 0; 719 DRM_INFO("writeback forced off\n"); 720 } 721 722 if (!dev_priv->writeback_works) { 723 /* Disable writeback to avoid unnecessary bus master transfers */ 724 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); 725 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); 726 } 727} 728 729/* Enable or disable IGP GART on the chip */ 730static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) 731{ 732 u32 temp; 733 734 if (on) { 735 DRM_DEBUG("programming igp gart %08X %08lX %08X\n", 736 dev_priv->gart_vm_start, 737 (long)dev_priv->gart_info.bus_addr, 738 dev_priv->gart_size); 739 740 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); 741 742 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 743 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 744 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | 745 RS690_BLOCK_GFX_D3_EN)); 746 else 747 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 748 749 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | 750 RS480_VA_SIZE_32MB)); 751 752 temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); 753 IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | 754 RS480_TLB_ENABLE | 755 RS480_GTW_LAC_EN | 756 RS480_1LEVEL_GART)); 757 758 temp = dev_priv->gart_info.bus_addr & 0xfffff000; 759 temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; 760 IGP_WRITE_MCIND(RS480_GART_BASE, temp); 761 762 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); 763 IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | 764 RS480_REQ_TYPE_SNOOP_DIS)); 765 766 radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); 767 768 dev_priv->gart_size = 32*1024*1024; 769 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 770 0xffff0000) | (dev_priv->gart_vm_start >> 16)); 771 772 radeon_write_agp_location(dev_priv, temp); 773 774 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); 775 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | 776 RS480_VA_SIZE_32MB)); 777 778 do { 779 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); 780 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) 781 break; 782 DRM_UDELAY(1); 783 } while(1); 784 785 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 786 RS480_GART_CACHE_INVALIDATE); 787 788 do { 789 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); 790 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) 791 break; 792 DRM_UDELAY(1); 793 } while(1); 794 795 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); 796 } else { 797 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); 798 } 799} 800 801static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) 802{ 803 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); 804 if (on) { 805 806 DRM_DEBUG("programming pcie %08X %08lX %08X\n", 807 dev_priv->gart_vm_start, 808 (long)dev_priv->gart_info.bus_addr, 809 dev_priv->gart_size); 810 RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, 811 dev_priv->gart_vm_start); 812 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, 813 dev_priv->gart_info.bus_addr); 814 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, 815 dev_priv->gart_vm_start); 816 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, 817 dev_priv->gart_vm_start + 818 dev_priv->gart_size - 1); 819 820 radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ 821 822 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, 823 RADEON_PCIE_TX_GART_EN); 824 } else { 825 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, 826 tmp & ~RADEON_PCIE_TX_GART_EN); 827 } 828} 829 830/* Enable or disable PCI GART on the chip */ 831static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) 832{ 833 u32 tmp; 834 835 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 836 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || 837 (dev_priv->flags & RADEON_IS_IGPGART)) { 838 radeon_set_igpgart(dev_priv, on); 839 return; 840 } 841 842 if (dev_priv->flags & RADEON_IS_PCIE) { 843 radeon_set_pciegart(dev_priv, on); 844 return; 845 } 846 847 tmp = RADEON_READ(RADEON_AIC_CNTL); 848 849 if (on) { 850 RADEON_WRITE(RADEON_AIC_CNTL, 851 tmp | RADEON_PCIGART_TRANSLATE_EN); 852 853 /* set PCI GART page-table base address 854 */ 855 RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); 856 857 /* set address range for PCI address translate 858 */ 859 RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); 860 RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start 861 + dev_priv->gart_size - 1); 862 863 /* Turn off AGP aperture -- is this required for PCI GART? 864 */ 865 radeon_write_agp_location(dev_priv, 0xffffffc0); 866 RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ 867 } else { 868 RADEON_WRITE(RADEON_AIC_CNTL, 869 tmp & ~RADEON_PCIGART_TRANSLATE_EN); 870 } 871} 872 873static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) 874{ 875 drm_radeon_private_t *dev_priv = dev->dev_private; 876 877 DRM_DEBUG("\n"); 878 879 /* if we require new memory map but we don't have it fail */ 880 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 881 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 882 radeon_do_cleanup_cp(dev); 883 return -EINVAL; 884 } 885 886 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) 887 { 888 DRM_DEBUG("Forcing AGP card to PCI mode\n"); 889 dev_priv->flags &= ~RADEON_IS_AGP; 890 } 891 else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) 892 && !init->is_pci) 893 { 894 DRM_DEBUG("Restoring AGP flag\n"); 895 dev_priv->flags |= RADEON_IS_AGP; 896 } 897 898 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { 899 DRM_ERROR("PCI GART memory not allocated!\n"); 900 radeon_do_cleanup_cp(dev); 901 return -EINVAL; 902 } 903 904 dev_priv->usec_timeout = init->usec_timeout; 905 if (dev_priv->usec_timeout < 1 || 906 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 907 DRM_DEBUG("TIMEOUT problem!\n"); 908 radeon_do_cleanup_cp(dev); 909 return -EINVAL; 910 } 911 912 /* Enable vblank on CRTC1 for older X servers 913 */ 914 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; 915 916 dev_priv->do_boxes = 0; 917 dev_priv->cp_mode = init->cp_mode; 918 919 /* We don't support anything other than bus-mastering ring mode, 920 * but the ring can be in either AGP or PCI space for the ring 921 * read pointer. 922 */ 923 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 924 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 925 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 926 radeon_do_cleanup_cp(dev); 927 return -EINVAL; 928 } 929 930 switch (init->fb_bpp) { 931 case 16: 932 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; 933 break; 934 case 32: 935 default: 936 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; 937 break; 938 } 939 dev_priv->front_offset = init->front_offset; 940 dev_priv->front_pitch = init->front_pitch; 941 dev_priv->back_offset = init->back_offset; 942 dev_priv->back_pitch = init->back_pitch; 943 944 switch (init->depth_bpp) { 945 case 16: 946 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; 947 break; 948 case 32: 949 default: 950 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; 951 break; 952 } 953 dev_priv->depth_offset = init->depth_offset; 954 dev_priv->depth_pitch = init->depth_pitch; 955 956 /* Hardware state for depth clears. Remove this if/when we no 957 * longer clear the depth buffer with a 3D rectangle. Hard-code 958 * all values to prevent unwanted 3D state from slipping through 959 * and screwing with the clear operation. 960 */ 961 dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | 962 (dev_priv->color_fmt << 10) | 963 (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); 964 965 dev_priv->depth_clear.rb3d_zstencilcntl = 966 (dev_priv->depth_fmt | 967 RADEON_Z_TEST_ALWAYS | 968 RADEON_STENCIL_TEST_ALWAYS | 969 RADEON_STENCIL_S_FAIL_REPLACE | 970 RADEON_STENCIL_ZPASS_REPLACE | 971 RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); 972 973 dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | 974 RADEON_BFACE_SOLID | 975 RADEON_FFACE_SOLID | 976 RADEON_FLAT_SHADE_VTX_LAST | 977 RADEON_DIFFUSE_SHADE_FLAT | 978 RADEON_ALPHA_SHADE_FLAT | 979 RADEON_SPECULAR_SHADE_FLAT | 980 RADEON_FOG_SHADE_FLAT | 981 RADEON_VTX_PIX_CENTER_OGL | 982 RADEON_ROUND_MODE_TRUNC | 983 RADEON_ROUND_PREC_8TH_PIX); 984 985 986 dev_priv->ring_offset = init->ring_offset; 987 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 988 dev_priv->buffers_offset = init->buffers_offset; 989 dev_priv->gart_textures_offset = init->gart_textures_offset; 990 991 dev_priv->sarea = drm_getsarea(dev); 992 if (!dev_priv->sarea) { 993 DRM_ERROR("could not find sarea!\n"); 994 radeon_do_cleanup_cp(dev); 995 return -EINVAL; 996 } 997 998 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 999 if (!dev_priv->cp_ring) { 1000 DRM_ERROR("could not find cp ring region!\n"); 1001 radeon_do_cleanup_cp(dev); 1002 return -EINVAL; 1003 } 1004 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1005 if (!dev_priv->ring_rptr) { 1006 DRM_ERROR("could not find ring read pointer!\n"); 1007 radeon_do_cleanup_cp(dev); 1008 return -EINVAL; 1009 } 1010 dev->agp_buffer_token = init->buffers_offset; 1011 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1012 if (!dev->agp_buffer_map) { 1013 DRM_ERROR("could not find dma buffer region!\n"); 1014 radeon_do_cleanup_cp(dev); 1015 return -EINVAL; 1016 } 1017 1018 if (init->gart_textures_offset) { 1019 dev_priv->gart_textures = 1020 drm_core_findmap(dev, init->gart_textures_offset); 1021 if (!dev_priv->gart_textures) { 1022 DRM_ERROR("could not find GART texture region!\n"); 1023 radeon_do_cleanup_cp(dev); 1024 return -EINVAL; 1025 } 1026 } 1027 1028 dev_priv->sarea_priv = 1029 (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + 1030 init->sarea_priv_offset); 1031 1032#if __OS_HAS_AGP 1033 if (dev_priv->flags & RADEON_IS_AGP) { 1034 drm_core_ioremap(dev_priv->cp_ring, dev); 1035 drm_core_ioremap(dev_priv->ring_rptr, dev); 1036 drm_core_ioremap(dev->agp_buffer_map, dev); 1037 if (!dev_priv->cp_ring->handle || 1038 !dev_priv->ring_rptr->handle || 1039 !dev->agp_buffer_map->handle) { 1040 DRM_ERROR("could not find ioremap agp regions!\n"); 1041 radeon_do_cleanup_cp(dev); 1042 return -EINVAL; 1043 } 1044 } else 1045#endif 1046 { 1047 dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; 1048 dev_priv->ring_rptr->handle = 1049 (void *)dev_priv->ring_rptr->offset; 1050 dev->agp_buffer_map->handle = 1051 (void *)dev->agp_buffer_map->offset; 1052 1053 DRM_DEBUG("dev_priv->cp_ring->handle %p\n", 1054 dev_priv->cp_ring->handle); 1055 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", 1056 dev_priv->ring_rptr->handle); 1057 DRM_DEBUG("dev->agp_buffer_map->handle %p\n", 1058 dev->agp_buffer_map->handle); 1059 } 1060 1061 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; 1062 dev_priv->fb_size = 1063 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) 1064 - dev_priv->fb_location; 1065 1066 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | 1067 ((dev_priv->front_offset 1068 + dev_priv->fb_location) >> 10)); 1069 1070 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | 1071 ((dev_priv->back_offset 1072 + dev_priv->fb_location) >> 10)); 1073 1074 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | 1075 ((dev_priv->depth_offset 1076 + dev_priv->fb_location) >> 10)); 1077 1078 dev_priv->gart_size = init->gart_size; 1079 1080 /* New let's set the memory map ... */ 1081 if (dev_priv->new_memmap) { 1082 u32 base = 0; 1083 1084 DRM_INFO("Setting GART location based on new memory map\n"); 1085 1086 /* If using AGP, try to locate the AGP aperture at the same 1087 * location in the card and on the bus, though we have to 1088 * align it down. 1089 */ 1090#if __OS_HAS_AGP 1091 if (dev_priv->flags & RADEON_IS_AGP) { 1092 base = dev->agp->base; 1093 /* Check if valid */ 1094 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && 1095 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { 1096 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", 1097 dev->agp->base); 1098 base = 0; 1099 } 1100 } 1101#endif 1102 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ 1103 if (base == 0) { 1104 base = dev_priv->fb_location + dev_priv->fb_size; 1105 if (base < dev_priv->fb_location || 1106 ((base + dev_priv->gart_size) & 0xfffffffful) < base) 1107 base = dev_priv->fb_location 1108 - dev_priv->gart_size; 1109 } 1110 dev_priv->gart_vm_start = base & 0xffc00000u; 1111 if (dev_priv->gart_vm_start != base) 1112 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", 1113 base, dev_priv->gart_vm_start); 1114 } else { 1115 DRM_INFO("Setting GART location based on old memory map\n"); 1116 dev_priv->gart_vm_start = dev_priv->fb_location + 1117 RADEON_READ(RADEON_CONFIG_APER_SIZE); 1118 } 1119 1120#if __OS_HAS_AGP 1121 if (dev_priv->flags & RADEON_IS_AGP) 1122 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1123 - dev->agp->base 1124 + dev_priv->gart_vm_start); 1125 else 1126#endif 1127 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1128 - (unsigned long)dev->sg->virtual 1129 + dev_priv->gart_vm_start); 1130 1131 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); 1132 DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); 1133 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", 1134 dev_priv->gart_buffers_offset); 1135 1136 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; 1137 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 1138 + init->ring_size / sizeof(u32)); 1139 dev_priv->ring.size = init->ring_size; 1140 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 1141 1142 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 1143 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); 1144 1145 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 1146 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); 1147 1148 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 1149 1150 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1151 1152#if __OS_HAS_AGP 1153 if (dev_priv->flags & RADEON_IS_AGP) { 1154 /* Turn off PCI GART */ 1155 radeon_set_pcigart(dev_priv, 0); 1156 } else 1157#endif 1158 { 1159 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); 1160 /* if we have an offset set from userspace */ 1161 if (dev_priv->pcigart_offset_set) { 1162 dev_priv->gart_info.bus_addr = 1163 dev_priv->pcigart_offset + dev_priv->fb_location; 1164 dev_priv->gart_info.mapping.offset = 1165 dev_priv->pcigart_offset + dev_priv->fb_aper_offset; 1166 dev_priv->gart_info.mapping.size = 1167 dev_priv->gart_info.table_size; 1168 1169 drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); 1170 dev_priv->gart_info.addr = 1171 dev_priv->gart_info.mapping.handle; 1172 1173 if (dev_priv->flags & RADEON_IS_PCIE) 1174 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; 1175 else 1176 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; 1177 dev_priv->gart_info.gart_table_location = 1178 DRM_ATI_GART_FB; 1179 1180 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", 1181 dev_priv->gart_info.addr, 1182 dev_priv->pcigart_offset); 1183 } else { 1184 if (dev_priv->flags & RADEON_IS_IGPGART) 1185 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; 1186 else 1187 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; 1188 dev_priv->gart_info.gart_table_location = 1189 DRM_ATI_GART_MAIN; 1190 dev_priv->gart_info.addr = NULL; 1191 dev_priv->gart_info.bus_addr = 0; 1192 if (dev_priv->flags & RADEON_IS_PCIE) { 1193 DRM_ERROR 1194 ("Cannot use PCI Express without GART in FB memory\n"); 1195 radeon_do_cleanup_cp(dev); 1196 return -EINVAL; 1197 } 1198 } 1199 1200 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1201 DRM_ERROR("failed to init PCI GART!\n"); 1202 radeon_do_cleanup_cp(dev); 1203 return -ENOMEM; 1204 } 1205 1206 /* Turn on PCI GART */ 1207 radeon_set_pcigart(dev_priv, 1); 1208 } 1209 1210 /* Start with assuming that writeback doesn't work */ 1211 dev_priv->writeback_works = 0; 1212 1213 radeon_cp_load_microcode(dev_priv); 1214 radeon_cp_init_ring_buffer(dev, dev_priv); 1215 1216 dev_priv->last_buf = 0; 1217 1218 radeon_do_engine_reset(dev); 1219 radeon_test_writeback(dev_priv); 1220 1221 return 0; 1222} 1223 1224static int radeon_do_cleanup_cp(struct drm_device * dev) 1225{ 1226 drm_radeon_private_t *dev_priv = dev->dev_private; 1227 DRM_DEBUG("\n"); 1228 1229 /* Make sure interrupts are disabled here because the uninstall ioctl 1230 * may not have been called from userspace and after dev_private 1231 * is freed, it's too late. 1232 */ 1233 if (dev->irq_enabled) 1234 drm_irq_uninstall(dev); 1235 1236#if __OS_HAS_AGP 1237 if (dev_priv->flags & RADEON_IS_AGP) { 1238 if (dev_priv->cp_ring != NULL) { 1239 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1240 dev_priv->cp_ring = NULL; 1241 } 1242 if (dev_priv->ring_rptr != NULL) { 1243 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1244 dev_priv->ring_rptr = NULL; 1245 } 1246 if (dev->agp_buffer_map != NULL) { 1247 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1248 dev->agp_buffer_map = NULL; 1249 } 1250 } else 1251#endif 1252 { 1253 1254 if (dev_priv->gart_info.bus_addr) { 1255 /* Turn off PCI GART */ 1256 radeon_set_pcigart(dev_priv, 0); 1257 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) 1258 DRM_ERROR("failed to cleanup PCI GART!\n"); 1259 } 1260 1261 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) 1262 { 1263 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1264 dev_priv->gart_info.addr = 0; 1265 } 1266 } 1267 /* only clear to the start of flags */ 1268 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1269 1270 return 0; 1271} 1272 1273/* This code will reinit the Radeon CP hardware after a resume from disc. 1274 * AFAIK, it would be very difficult to pickle the state at suspend time, so 1275 * here we make sure that all Radeon hardware initialisation is re-done without 1276 * affecting running applications. 1277 * 1278 * Charl P. Botha <http://cpbotha.net> 1279 */ 1280static int radeon_do_resume_cp(struct drm_device * dev) 1281{ 1282 drm_radeon_private_t *dev_priv = dev->dev_private; 1283 1284 if (!dev_priv) { 1285 DRM_ERROR("Called with no initialization\n"); 1286 return -EINVAL; 1287 } 1288 1289 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1290 1291#if __OS_HAS_AGP 1292 if (dev_priv->flags & RADEON_IS_AGP) { 1293 /* Turn off PCI GART */ 1294 radeon_set_pcigart(dev_priv, 0); 1295 } else 1296#endif 1297 { 1298 /* Turn on PCI GART */ 1299 radeon_set_pcigart(dev_priv, 1); 1300 } 1301 1302 radeon_cp_load_microcode(dev_priv); 1303 radeon_cp_init_ring_buffer(dev, dev_priv); 1304 1305 radeon_do_engine_reset(dev); 1306 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1307 1308 DRM_DEBUG("radeon_do_resume_cp() complete\n"); 1309 1310 return 0; 1311} 1312 1313int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 1314{ 1315 drm_radeon_init_t *init = data; 1316 1317 LOCK_TEST_WITH_RETURN(dev, file_priv); 1318 1319 if (init->func == RADEON_INIT_R300_CP) 1320 r300_init_reg_flags(dev); 1321 1322 switch (init->func) { 1323 case RADEON_INIT_CP: 1324 case RADEON_INIT_R200_CP: 1325 case RADEON_INIT_R300_CP: 1326 return radeon_do_init_cp(dev, init); 1327 case RADEON_CLEANUP_CP: 1328 return radeon_do_cleanup_cp(dev); 1329 } 1330 1331 return -EINVAL; 1332} 1333 1334int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) 1335{ 1336 drm_radeon_private_t *dev_priv = dev->dev_private; 1337 DRM_DEBUG("\n"); 1338 1339 LOCK_TEST_WITH_RETURN(dev, file_priv); 1340 1341 if (dev_priv->cp_running) { 1342 DRM_DEBUG("while CP running\n"); 1343 return 0; 1344 } 1345 if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { 1346 DRM_DEBUG("called with bogus CP mode (%d)\n", 1347 dev_priv->cp_mode); 1348 return 0; 1349 } 1350 1351 radeon_do_cp_start(dev_priv); 1352 1353 return 0; 1354} 1355 1356/* Stop the CP. The engine must have been idled before calling this 1357 * routine. 1358 */ 1359int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) 1360{ 1361 drm_radeon_private_t *dev_priv = dev->dev_private; 1362 drm_radeon_cp_stop_t *stop = data; 1363 int ret; 1364 DRM_DEBUG("\n"); 1365 1366 LOCK_TEST_WITH_RETURN(dev, file_priv); 1367 1368 if (!dev_priv->cp_running) 1369 return 0; 1370 1371 /* Flush any pending CP commands. This ensures any outstanding 1372 * commands are exectuted by the engine before we turn it off. 1373 */ 1374 if (stop->flush) { 1375 radeon_do_cp_flush(dev_priv); 1376 } 1377 1378 /* If we fail to make the engine go idle, we return an error 1379 * code so that the DRM ioctl wrapper can try again. 1380 */ 1381 if (stop->idle) { 1382 ret = radeon_do_cp_idle(dev_priv); 1383 if (ret) 1384 return ret; 1385 } 1386 1387 /* Finally, we can turn off the CP. If the engine isn't idle, 1388 * we will get some dropped triangles as they won't be fully 1389 * rendered before the CP is shut down. 1390 */ 1391 radeon_do_cp_stop(dev_priv); 1392 1393 /* Reset the engine */ 1394 radeon_do_engine_reset(dev); 1395 1396 return 0; 1397} 1398 1399void radeon_do_release(struct drm_device * dev) 1400{ 1401 drm_radeon_private_t *dev_priv = dev->dev_private; 1402 int i, ret; 1403 1404 if (dev_priv) { 1405 if (dev_priv->cp_running) { 1406 /* Stop the cp */ 1407 while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { 1408 DRM_DEBUG("radeon_do_cp_idle %d\n", ret); 1409#ifdef __linux__ 1410 schedule(); 1411#else 1412#if defined(__FreeBSD__) && __FreeBSD_version > 500000 1413 mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel", 1414 1); 1415#else 1416 tsleep(&ret, PZERO, "rdnrel", 1); 1417#endif 1418#endif 1419 } 1420 radeon_do_cp_stop(dev_priv); 1421 radeon_do_engine_reset(dev); 1422 } 1423 1424 /* Disable *all* interrupts */ 1425 if (dev_priv->mmio) /* remove this after permanent addmaps */ 1426 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 1427 1428 if (dev_priv->mmio) { /* remove all surfaces */ 1429 for (i = 0; i < RADEON_MAX_SURFACES; i++) { 1430 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); 1431 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 1432 16 * i, 0); 1433 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 1434 16 * i, 0); 1435 } 1436 } 1437 1438 /* Free memory heap structures */ 1439 radeon_mem_takedown(&(dev_priv->gart_heap)); 1440 radeon_mem_takedown(&(dev_priv->fb_heap)); 1441 1442 /* deallocate kernel resources */ 1443 radeon_do_cleanup_cp(dev); 1444 } 1445} 1446 1447/* Just reset the CP ring. Called as part of an X Server engine reset. 1448 */ 1449int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 1450{ 1451 drm_radeon_private_t *dev_priv = dev->dev_private; 1452 DRM_DEBUG("\n"); 1453 1454 LOCK_TEST_WITH_RETURN(dev, file_priv); 1455 1456 if (!dev_priv) { 1457 DRM_DEBUG("called before init done\n"); 1458 return -EINVAL; 1459 } 1460 1461 radeon_do_cp_reset(dev_priv); 1462 1463 /* The CP is no longer running after an engine reset */ 1464 dev_priv->cp_running = 0; 1465 1466 return 0; 1467} 1468 1469int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) 1470{ 1471 drm_radeon_private_t *dev_priv = dev->dev_private; 1472 DRM_DEBUG("\n"); 1473 1474 LOCK_TEST_WITH_RETURN(dev, file_priv); 1475 1476 return radeon_do_cp_idle(dev_priv); 1477} 1478 1479/* Added by Charl P. Botha to call radeon_do_resume_cp(). 1480 */ 1481int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) 1482{ 1483 1484 return radeon_do_resume_cp(dev); 1485} 1486 1487int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 1488{ 1489 DRM_DEBUG("\n"); 1490 1491 LOCK_TEST_WITH_RETURN(dev, file_priv); 1492 1493 return radeon_do_engine_reset(dev); 1494} 1495 1496/* ================================================================ 1497 * Fullscreen mode 1498 */ 1499 1500/* KW: Deprecated to say the least: 1501 */ 1502int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) 1503{ 1504 return 0; 1505} 1506 1507/* ================================================================ 1508 * Freelist management 1509 */ 1510 1511/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through 1512 * bufs until freelist code is used. Note this hides a problem with 1513 * the scratch register * (used to keep track of last buffer 1514 * completed) being written to before * the last buffer has actually 1515 * completed rendering. 1516 * 1517 * KW: It's also a good way to find free buffers quickly. 1518 * 1519 * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't 1520 * sleep. However, bugs in older versions of radeon_accel.c mean that 1521 * we essentially have to do this, else old clients will break. 1522 * 1523 * However, it does leave open a potential deadlock where all the 1524 * buffers are held by other clients, which can't release them because 1525 * they can't get the lock. 1526 */ 1527 1528struct drm_buf *radeon_freelist_get(struct drm_device * dev) 1529{ 1530 struct drm_device_dma *dma = dev->dma; 1531 drm_radeon_private_t *dev_priv = dev->dev_private; 1532 drm_radeon_buf_priv_t *buf_priv; 1533 struct drm_buf *buf; 1534 int i, t; 1535 int start; 1536 1537 if (++dev_priv->last_buf >= dma->buf_count) 1538 dev_priv->last_buf = 0; 1539 1540 start = dev_priv->last_buf; 1541 1542 for (t = 0; t < dev_priv->usec_timeout; t++) { 1543 u32 done_age = GET_SCRATCH(1); 1544 DRM_DEBUG("done_age = %d\n", done_age); 1545 for (i = start; i < dma->buf_count; i++) { 1546 buf = dma->buflist[i]; 1547 buf_priv = buf->dev_private; 1548 if (buf->file_priv == NULL || (buf->pending && 1549 buf_priv->age <= 1550 done_age)) { 1551 dev_priv->stats.requested_bufs++; 1552 buf->pending = 0; 1553 return buf; 1554 } 1555 start = 0; 1556 } 1557 1558 if (t) { 1559 DRM_UDELAY(1); 1560 dev_priv->stats.freelist_loops++; 1561 } 1562 } 1563 1564 DRM_DEBUG("returning NULL!\n"); 1565 return NULL; 1566} 1567 1568#if 0 1569struct drm_buf *radeon_freelist_get(struct drm_device * dev) 1570{ 1571 struct drm_device_dma *dma = dev->dma; 1572 drm_radeon_private_t *dev_priv = dev->dev_private; 1573 drm_radeon_buf_priv_t *buf_priv; 1574 struct drm_buf *buf; 1575 int i, t; 1576 int start; 1577 u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); 1578 1579 if (++dev_priv->last_buf >= dma->buf_count) 1580 dev_priv->last_buf = 0; 1581 1582 start = dev_priv->last_buf; 1583 dev_priv->stats.freelist_loops++; 1584 1585 for (t = 0; t < 2; t++) { 1586 for (i = start; i < dma->buf_count; i++) { 1587 buf = dma->buflist[i]; 1588 buf_priv = buf->dev_private; 1589 if (buf->file_priv == 0 || (buf->pending && 1590 buf_priv->age <= 1591 done_age)) { 1592 dev_priv->stats.requested_bufs++; 1593 buf->pending = 0; 1594 return buf; 1595 } 1596 } 1597 start = 0; 1598 } 1599 1600 return NULL; 1601} 1602#endif 1603 1604void radeon_freelist_reset(struct drm_device * dev) 1605{ 1606 struct drm_device_dma *dma = dev->dma; 1607 drm_radeon_private_t *dev_priv = dev->dev_private; 1608 int i; 1609 1610 dev_priv->last_buf = 0; 1611 for (i = 0; i < dma->buf_count; i++) { 1612 struct drm_buf *buf = dma->buflist[i]; 1613 drm_radeon_buf_priv_t *buf_priv = buf->dev_private; 1614 buf_priv->age = 0; 1615 } 1616} 1617 1618/* ================================================================ 1619 * CP command submission 1620 */ 1621 1622int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) 1623{ 1624 drm_radeon_ring_buffer_t *ring = &dev_priv->ring; 1625 int i; 1626 u32 last_head = GET_RING_HEAD(dev_priv); 1627 1628 for (i = 0; i < dev_priv->usec_timeout; i++) { 1629 u32 head = GET_RING_HEAD(dev_priv); 1630 1631 ring->space = (head - ring->tail) * sizeof(u32); 1632 if (ring->space <= 0) 1633 ring->space += ring->size; 1634 if (ring->space > n) 1635 return 0; 1636 1637 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 1638 1639 if (head != last_head) 1640 i = 0; 1641 last_head = head; 1642 1643 DRM_UDELAY(1); 1644 } 1645 1646 /* FIXME: This return value is ignored in the BEGIN_RING macro! */ 1647#if RADEON_FIFO_DEBUG 1648 radeon_status(dev_priv); 1649 DRM_ERROR("failed!\n"); 1650#endif 1651 return -EBUSY; 1652} 1653 1654static int radeon_cp_get_buffers(struct drm_device *dev, 1655 struct drm_file *file_priv, 1656 struct drm_dma * d) 1657{ 1658 int i; 1659 struct drm_buf *buf; 1660 1661 for (i = d->granted_count; i < d->request_count; i++) { 1662 buf = radeon_freelist_get(dev); 1663 if (!buf) 1664 return -EBUSY; /* NOTE: broken client */ 1665 1666 buf->file_priv = file_priv; 1667 1668 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 1669 sizeof(buf->idx))) 1670 return -EFAULT; 1671 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 1672 sizeof(buf->total))) 1673 return -EFAULT; 1674 1675 d->granted_count++; 1676 } 1677 return 0; 1678} 1679 1680int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) 1681{ 1682 struct drm_device_dma *dma = dev->dma; 1683 int ret = 0; 1684 struct drm_dma *d = data; 1685 1686 LOCK_TEST_WITH_RETURN(dev, file_priv); 1687 1688 /* Please don't send us buffers. 1689 */ 1690 if (d->send_count != 0) { 1691 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1692 DRM_CURRENTPID, d->send_count); 1693 return -EINVAL; 1694 } 1695 1696 /* We'll send you buffers. 1697 */ 1698 if (d->request_count < 0 || d->request_count > dma->buf_count) { 1699 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1700 DRM_CURRENTPID, d->request_count, dma->buf_count); 1701 return -EINVAL; 1702 } 1703 1704 d->granted_count = 0; 1705 1706 if (d->request_count) { 1707 ret = radeon_cp_get_buffers(dev, file_priv, d); 1708 } 1709 1710 return ret; 1711} 1712 1713int radeon_driver_load(struct drm_device *dev, unsigned long flags) 1714{ 1715 drm_radeon_private_t *dev_priv; 1716 int ret = 0; 1717 1718 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 1719 if (dev_priv == NULL) 1720 return -ENOMEM; 1721 1722 memset(dev_priv, 0, sizeof(drm_radeon_private_t)); 1723 dev->dev_private = (void *)dev_priv; 1724 dev_priv->flags = flags; 1725 1726 switch (flags & RADEON_FAMILY_MASK) { 1727 case CHIP_R100: 1728 case CHIP_RV200: 1729 case CHIP_R200: 1730 case CHIP_R300: 1731 case CHIP_R350: 1732 case CHIP_R420: 1733 case CHIP_R423: 1734 case CHIP_RV410: 1735 case CHIP_RV515: 1736 case CHIP_R520: 1737 case CHIP_RV570: 1738 case CHIP_R580: 1739 dev_priv->flags |= RADEON_HAS_HIERZ; 1740 break; 1741 default: 1742 /* all other chips have no hierarchical z buffer */ 1743 break; 1744 } 1745 1746 dev_priv->chip_family = flags & RADEON_FAMILY_MASK; 1747 if (drm_device_is_agp(dev)) 1748 dev_priv->flags |= RADEON_IS_AGP; 1749 else if (drm_device_is_pcie(dev)) 1750 dev_priv->flags |= RADEON_IS_PCIE; 1751 else 1752 dev_priv->flags |= RADEON_IS_PCI; 1753 1754 ret = drm_vblank_init(dev, 2); 1755 if (ret) { 1756 radeon_driver_unload(dev); 1757 return ret; 1758 } 1759 1760 DRM_DEBUG("%s card detected\n", 1761 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1762 return ret; 1763} 1764 1765/* Create mappings for registers and framebuffer so userland doesn't necessarily 1766 * have to find them. 1767 */ 1768int radeon_driver_firstopen(struct drm_device *dev) 1769{ 1770 int ret; 1771 drm_local_map_t *map; 1772 drm_radeon_private_t *dev_priv = dev->dev_private; 1773 1774 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 1775 1776 ret = drm_addmap(dev, drm_get_resource_start(dev, 2), 1777 drm_get_resource_len(dev, 2), _DRM_REGISTERS, 1778 _DRM_READ_ONLY, &dev_priv->mmio); 1779 if (ret != 0) 1780 return ret; 1781 1782 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); 1783 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 1784 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, 1785 _DRM_WRITE_COMBINING, &map); 1786 if (ret != 0) 1787 return ret; 1788 1789 return 0; 1790} 1791 1792int radeon_driver_unload(struct drm_device *dev) 1793{ 1794 drm_radeon_private_t *dev_priv = dev->dev_private; 1795 1796 DRM_DEBUG("\n"); 1797 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 1798 1799 dev->dev_private = NULL; 1800 return 0; 1801} 1802