1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: releng/10.3/sys/dev/drm2/radeon/r300.c 282199 2015-04-28 19:35:05Z dumbbell $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/drm_crtc_helper.h> 35#include "radeon_reg.h" 36#include "radeon.h" 37#include "radeon_asic.h" 38#include <dev/drm2/radeon/radeon_drm.h> 39#include "r100_track.h" 40#include "r300d.h" 41#include "rv350d.h" 42#include "r300_reg_safe.h" 43 44/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 45 * 46 * GPU Errata: 47 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 48 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 49 * However, scheduling such write to the ring seems harmless, i suspect 50 * the CP read collide with the flush somehow, or maybe the MC, hard to 51 * tell. (Jerome Glisse) 52 */ 53 54/* 55 * rv370,rv380 PCIE GART 56 */ 57static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 58 59void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 60{ 61 uint32_t tmp; 62 int i; 63 64 /* Workaround HW bug do flush 2 times */ 65 for (i = 0; i < 2; i++) { 66 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 68 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 69 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 70 } 71 mb(); 72} 73 74#define R300_PTE_WRITEABLE (1 << 2) 75#define R300_PTE_READABLE (1 << 3) 76 77int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 78{ 79 volatile uint32_t *ptr = rdev->gart.ptr; 80 81 if (i < 0 || i > rdev->gart.num_gpu_pages) { 82 return -EINVAL; 83 } 84 addr = (lower_32_bits(addr) >> 8) | 85 ((upper_32_bits(addr) & 0xff) << 24) | 86 R300_PTE_WRITEABLE | R300_PTE_READABLE; 87 /* on x86 we want this to be CPU endian, on powerpc 88 * on powerpc without HW swappers, it'll get swapped on way 89 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 90 ptr += i; 91 *ptr = (uint32_t)addr; 92 return 0; 93} 94 95int rv370_pcie_gart_init(struct radeon_device *rdev) 96{ 97 int r; 98 99 if (rdev->gart.robj) { 100 DRM_ERROR("RV370 PCIE GART already initialized\n"); 101 return 0; 102 } 103 /* Initialize common gart structure */ 104 r = radeon_gart_init(rdev); 105 if (r) 106 return r; 107 r = rv370_debugfs_pcie_gart_info_init(rdev); 108 if (r) 109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 112 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 113 return radeon_gart_table_vram_alloc(rdev); 114} 115 116int rv370_pcie_gart_enable(struct radeon_device *rdev) 117{ 118 uint32_t table_addr; 119 uint32_t tmp; 120 int r; 121 122 if (rdev->gart.robj == NULL) { 123 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 124 return -EINVAL; 125 } 126 r = radeon_gart_table_vram_pin(rdev); 127 if (r) 128 return r; 129 radeon_gart_restore(rdev); 130 /* discard memory request outside of configured range */ 131 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 132 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 133 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 134 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 135 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 136 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 137 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 138 table_addr = rdev->gart.table_addr; 139 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 140 /* FIXME: setup default page */ 141 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 142 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 143 /* Clear error */ 144 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); 145 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 146 tmp |= RADEON_PCIE_TX_GART_EN; 147 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 148 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 149 rv370_pcie_gart_tlb_flush(rdev); 150 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 151 (unsigned)(rdev->mc.gtt_size >> 20), 152 (unsigned long long)table_addr); 153 rdev->gart.ready = true; 154 return 0; 155} 156 157void rv370_pcie_gart_disable(struct radeon_device *rdev) 158{ 159 u32 tmp; 160 161 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 162 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 163 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 164 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 165 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 166 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 167 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 168 radeon_gart_table_vram_unpin(rdev); 169} 170 171void rv370_pcie_gart_fini(struct radeon_device *rdev) 172{ 173 radeon_gart_fini(rdev); 174 rv370_pcie_gart_disable(rdev); 175 radeon_gart_table_vram_free(rdev); 176} 177 178void r300_fence_ring_emit(struct radeon_device *rdev, 179 struct radeon_fence *fence) 180{ 181 struct radeon_ring *ring = &rdev->ring[fence->ring]; 182 183 /* Who ever call radeon_fence_emit should call ring_lock and ask 184 * for enough space (today caller are ib schedule and buffer move) */ 185 /* Write SC register so SC & US assert idle */ 186 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); 187 radeon_ring_write(ring, 0); 188 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); 189 radeon_ring_write(ring, 0); 190 /* Flush 3D cache */ 191 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 192 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); 193 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 194 radeon_ring_write(ring, R300_ZC_FLUSH); 195 /* Wait until IDLE & CLEAN */ 196 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 197 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | 198 RADEON_WAIT_2D_IDLECLEAN | 199 RADEON_WAIT_DMA_GUI_IDLE)); 200 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 201 radeon_ring_write(ring, rdev->config.r300.hdp_cntl | 202 RADEON_HDP_READ_BUFFER_INVALIDATE); 203 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 204 radeon_ring_write(ring, rdev->config.r300.hdp_cntl); 205 /* Emit fence sequence & fire IRQ */ 206 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 207 radeon_ring_write(ring, fence->seq); 208 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 209 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 210} 211 212void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 213{ 214 unsigned gb_tile_config; 215 int r; 216 217 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 218 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 219 switch(rdev->num_gb_pipes) { 220 case 2: 221 gb_tile_config |= R300_PIPE_COUNT_R300; 222 break; 223 case 3: 224 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 225 break; 226 case 4: 227 gb_tile_config |= R300_PIPE_COUNT_R420; 228 break; 229 case 1: 230 default: 231 gb_tile_config |= R300_PIPE_COUNT_RV350; 232 break; 233 } 234 235 r = radeon_ring_lock(rdev, ring, 64); 236 if (r) { 237 return; 238 } 239 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 240 radeon_ring_write(ring, 241 RADEON_ISYNC_ANY2D_IDLE3D | 242 RADEON_ISYNC_ANY3D_IDLE2D | 243 RADEON_ISYNC_WAIT_IDLEGUI | 244 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 245 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); 246 radeon_ring_write(ring, gb_tile_config); 247 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 248 radeon_ring_write(ring, 249 RADEON_WAIT_2D_IDLECLEAN | 250 RADEON_WAIT_3D_IDLECLEAN); 251 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 252 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 253 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); 254 radeon_ring_write(ring, 0); 255 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); 256 radeon_ring_write(ring, 0); 257 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 258 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 259 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 260 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 261 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 262 radeon_ring_write(ring, 263 RADEON_WAIT_2D_IDLECLEAN | 264 RADEON_WAIT_3D_IDLECLEAN); 265 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); 266 radeon_ring_write(ring, 0); 267 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 268 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 269 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 270 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 271 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); 272 radeon_ring_write(ring, 273 ((6 << R300_MS_X0_SHIFT) | 274 (6 << R300_MS_Y0_SHIFT) | 275 (6 << R300_MS_X1_SHIFT) | 276 (6 << R300_MS_Y1_SHIFT) | 277 (6 << R300_MS_X2_SHIFT) | 278 (6 << R300_MS_Y2_SHIFT) | 279 (6 << R300_MSBD0_Y_SHIFT) | 280 (6 << R300_MSBD0_X_SHIFT))); 281 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); 282 radeon_ring_write(ring, 283 ((6 << R300_MS_X3_SHIFT) | 284 (6 << R300_MS_Y3_SHIFT) | 285 (6 << R300_MS_X4_SHIFT) | 286 (6 << R300_MS_Y4_SHIFT) | 287 (6 << R300_MS_X5_SHIFT) | 288 (6 << R300_MS_Y5_SHIFT) | 289 (6 << R300_MSBD1_SHIFT))); 290 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); 291 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 292 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); 293 radeon_ring_write(ring, 294 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 295 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); 296 radeon_ring_write(ring, 297 R300_GEOMETRY_ROUND_NEAREST | 298 R300_COLOR_ROUND_NEAREST); 299 radeon_ring_unlock_commit(rdev, ring); 300} 301 302static void r300_errata(struct radeon_device *rdev) 303{ 304 rdev->pll_errata = 0; 305 306 if (rdev->family == CHIP_R300 && 307 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 308 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 309 } 310} 311 312int r300_mc_wait_for_idle(struct radeon_device *rdev) 313{ 314 unsigned i; 315 uint32_t tmp; 316 317 for (i = 0; i < rdev->usec_timeout; i++) { 318 /* read MC_STATUS */ 319 tmp = RREG32(RADEON_MC_STATUS); 320 if (tmp & R300_MC_IDLE) { 321 return 0; 322 } 323 DRM_UDELAY(1); 324 } 325 return -1; 326} 327 328static void r300_gpu_init(struct radeon_device *rdev) 329{ 330 uint32_t gb_tile_config, tmp; 331 332 if ((rdev->family == CHIP_R300 && rdev->ddev->pci_device != 0x4144) || 333 (rdev->family == CHIP_R350 && rdev->ddev->pci_device != 0x4148)) { 334 /* r300,r350 */ 335 rdev->num_gb_pipes = 2; 336 } else { 337 /* rv350,rv370,rv380,r300 AD, r350 AH */ 338 rdev->num_gb_pipes = 1; 339 } 340 rdev->num_z_pipes = 1; 341 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 342 switch (rdev->num_gb_pipes) { 343 case 2: 344 gb_tile_config |= R300_PIPE_COUNT_R300; 345 break; 346 case 3: 347 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 348 break; 349 case 4: 350 gb_tile_config |= R300_PIPE_COUNT_R420; 351 break; 352 default: 353 case 1: 354 gb_tile_config |= R300_PIPE_COUNT_RV350; 355 break; 356 } 357 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 358 359 if (r100_gui_wait_for_idle(rdev)) { 360 DRM_ERROR("Failed to wait GUI idle while " 361 "programming pipes. Bad things might happen.\n"); 362 } 363 364 tmp = RREG32(R300_DST_PIPE_CONFIG); 365 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 366 367 WREG32(R300_RB2D_DSTCACHE_MODE, 368 R300_DC_AUTOFLUSH_ENABLE | 369 R300_DC_DC_DISABLE_IGNORE_PE); 370 371 if (r100_gui_wait_for_idle(rdev)) { 372 DRM_ERROR("Failed to wait GUI idle while " 373 "programming pipes. Bad things might happen.\n"); 374 } 375 if (r300_mc_wait_for_idle(rdev)) { 376 DRM_ERROR("Failed to wait MC idle while " 377 "programming pipes. Bad things might happen.\n"); 378 } 379 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", 380 rdev->num_gb_pipes, rdev->num_z_pipes); 381} 382 383int r300_asic_reset(struct radeon_device *rdev) 384{ 385 struct r100_mc_save save; 386 u32 status, tmp; 387 int ret = 0; 388 389 status = RREG32(R_000E40_RBBM_STATUS); 390 if (!G_000E40_GUI_ACTIVE(status)) { 391 return 0; 392 } 393 r100_mc_stop(rdev, &save); 394 status = RREG32(R_000E40_RBBM_STATUS); 395 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 396 /* stop CP */ 397 WREG32(RADEON_CP_CSQ_CNTL, 0); 398 tmp = RREG32(RADEON_CP_RB_CNTL); 399 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 400 WREG32(RADEON_CP_RB_RPTR_WR, 0); 401 WREG32(RADEON_CP_RB_WPTR, 0); 402 WREG32(RADEON_CP_RB_CNTL, tmp); 403 /* save PCI state */ 404 pci_save_state(device_get_parent(rdev->dev)); 405 /* disable bus mastering */ 406 r100_bm_disable(rdev); 407 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 408 S_0000F0_SOFT_RESET_GA(1)); 409 RREG32(R_0000F0_RBBM_SOFT_RESET); 410 mdelay(500); 411 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 412 mdelay(1); 413 status = RREG32(R_000E40_RBBM_STATUS); 414 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 415 /* resetting the CP seems to be problematic sometimes it end up 416 * hard locking the computer, but it's necessary for successful 417 * reset more test & playing is needed on R3XX/R4XX to find a 418 * reliable (if any solution) 419 */ 420 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 421 RREG32(R_0000F0_RBBM_SOFT_RESET); 422 mdelay(500); 423 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 424 mdelay(1); 425 status = RREG32(R_000E40_RBBM_STATUS); 426 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 427 /* restore PCI & busmastering */ 428 pci_restore_state(device_get_parent(rdev->dev)); 429 r100_enable_bm(rdev); 430 /* Check if GPU is idle */ 431 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 432 dev_err(rdev->dev, "failed to reset GPU\n"); 433 ret = -1; 434 } else 435 dev_info(rdev->dev, "GPU reset succeed\n"); 436 r100_mc_resume(rdev, &save); 437 return ret; 438} 439 440/* 441 * r300,r350,rv350,rv380 VRAM info 442 */ 443void r300_mc_init(struct radeon_device *rdev) 444{ 445 u64 base; 446 u32 tmp; 447 448 /* DDR for all card after R300 & IGP */ 449 rdev->mc.vram_is_ddr = true; 450 tmp = RREG32(RADEON_MEM_CNTL); 451 tmp &= R300_MEM_NUM_CHANNELS_MASK; 452 switch (tmp) { 453 case 0: rdev->mc.vram_width = 64; break; 454 case 1: rdev->mc.vram_width = 128; break; 455 case 2: rdev->mc.vram_width = 256; break; 456 default: rdev->mc.vram_width = 128; break; 457 } 458 r100_vram_init_sizes(rdev); 459 base = rdev->mc.aper_base; 460 if (rdev->flags & RADEON_IS_IGP) 461 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 462 radeon_vram_location(rdev, &rdev->mc, base); 463 rdev->mc.gtt_base_align = 0; 464 if (!(rdev->flags & RADEON_IS_AGP)) 465 radeon_gtt_location(rdev, &rdev->mc); 466 radeon_update_bandwidth_info(rdev); 467} 468 469void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 470{ 471 uint32_t link_width_cntl, mask; 472 473 if (rdev->flags & RADEON_IS_IGP) 474 return; 475 476 if (!(rdev->flags & RADEON_IS_PCIE)) 477 return; 478 479 /* FIXME wait for idle */ 480 481 switch (lanes) { 482 case 0: 483 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 484 break; 485 case 1: 486 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 487 break; 488 case 2: 489 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 490 break; 491 case 4: 492 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 493 break; 494 case 8: 495 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 496 break; 497 case 12: 498 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 499 break; 500 case 16: 501 default: 502 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 503 break; 504 } 505 506 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 507 508 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 509 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 510 return; 511 512 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 513 RADEON_PCIE_LC_RECONFIG_NOW | 514 RADEON_PCIE_LC_RECONFIG_LATER | 515 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 516 link_width_cntl |= mask; 517 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 518 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 519 RADEON_PCIE_LC_RECONFIG_NOW)); 520 521 /* wait for lane set to complete */ 522 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 523 while (link_width_cntl == 0xffffffff) 524 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 525 526} 527 528int rv370_get_pcie_lanes(struct radeon_device *rdev) 529{ 530 u32 link_width_cntl; 531 532 if (rdev->flags & RADEON_IS_IGP) 533 return 0; 534 535 if (!(rdev->flags & RADEON_IS_PCIE)) 536 return 0; 537 538 /* FIXME wait for idle */ 539 540 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 541 542 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 543 case RADEON_PCIE_LC_LINK_WIDTH_X0: 544 return 0; 545 case RADEON_PCIE_LC_LINK_WIDTH_X1: 546 return 1; 547 case RADEON_PCIE_LC_LINK_WIDTH_X2: 548 return 2; 549 case RADEON_PCIE_LC_LINK_WIDTH_X4: 550 return 4; 551 case RADEON_PCIE_LC_LINK_WIDTH_X8: 552 return 8; 553 case RADEON_PCIE_LC_LINK_WIDTH_X16: 554 default: 555 return 16; 556 } 557} 558 559#if defined(CONFIG_DEBUG_FS) 560static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 561{ 562 struct drm_info_node *node = (struct drm_info_node *) m->private; 563 struct drm_device *dev = node->minor->dev; 564 struct radeon_device *rdev = dev->dev_private; 565 uint32_t tmp; 566 567 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 568 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 569 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 570 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 571 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 572 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 573 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 574 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 575 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 576 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 577 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 578 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 579 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 580 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 581 return 0; 582} 583 584static struct drm_info_list rv370_pcie_gart_info_list[] = { 585 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, 586}; 587#endif 588 589static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 590{ 591#if defined(CONFIG_DEBUG_FS) 592 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 593#else 594 return 0; 595#endif 596} 597 598static int r300_packet0_check(struct radeon_cs_parser *p, 599 struct radeon_cs_packet *pkt, 600 unsigned idx, unsigned reg) 601{ 602 struct radeon_cs_reloc *reloc; 603 struct r100_cs_track *track; 604 volatile uint32_t *ib; 605 uint32_t tmp, tile_flags = 0; 606 unsigned i; 607 int r; 608 u32 idx_value; 609 610 ib = p->ib.ptr; 611 track = (struct r100_cs_track *)p->track; 612 idx_value = radeon_get_ib_value(p, idx); 613 614 switch(reg) { 615 case AVIVO_D1MODE_VLINE_START_END: 616 case RADEON_CRTC_GUI_TRIG_VLINE: 617 r = r100_cs_packet_parse_vline(p); 618 if (r) { 619 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 620 idx, reg); 621 r100_cs_dump_packet(p, pkt); 622 return r; 623 } 624 break; 625 case RADEON_DST_PITCH_OFFSET: 626 case RADEON_SRC_PITCH_OFFSET: 627 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 628 if (r) 629 return r; 630 break; 631 case R300_RB3D_COLOROFFSET0: 632 case R300_RB3D_COLOROFFSET1: 633 case R300_RB3D_COLOROFFSET2: 634 case R300_RB3D_COLOROFFSET3: 635 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 636 r = r100_cs_packet_next_reloc(p, &reloc); 637 if (r) { 638 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 639 idx, reg); 640 r100_cs_dump_packet(p, pkt); 641 return r; 642 } 643 track->cb[i].robj = reloc->robj; 644 track->cb[i].offset = idx_value; 645 track->cb_dirty = true; 646 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 647 break; 648 case R300_ZB_DEPTHOFFSET: 649 r = r100_cs_packet_next_reloc(p, &reloc); 650 if (r) { 651 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 652 idx, reg); 653 r100_cs_dump_packet(p, pkt); 654 return r; 655 } 656 track->zb.robj = reloc->robj; 657 track->zb.offset = idx_value; 658 track->zb_dirty = true; 659 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 660 break; 661 case R300_TX_OFFSET_0: 662 case R300_TX_OFFSET_0+4: 663 case R300_TX_OFFSET_0+8: 664 case R300_TX_OFFSET_0+12: 665 case R300_TX_OFFSET_0+16: 666 case R300_TX_OFFSET_0+20: 667 case R300_TX_OFFSET_0+24: 668 case R300_TX_OFFSET_0+28: 669 case R300_TX_OFFSET_0+32: 670 case R300_TX_OFFSET_0+36: 671 case R300_TX_OFFSET_0+40: 672 case R300_TX_OFFSET_0+44: 673 case R300_TX_OFFSET_0+48: 674 case R300_TX_OFFSET_0+52: 675 case R300_TX_OFFSET_0+56: 676 case R300_TX_OFFSET_0+60: 677 i = (reg - R300_TX_OFFSET_0) >> 2; 678 r = r100_cs_packet_next_reloc(p, &reloc); 679 if (r) { 680 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 681 idx, reg); 682 r100_cs_dump_packet(p, pkt); 683 return r; 684 } 685 686 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 687 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 688 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); 689 } else { 690 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 691 tile_flags |= R300_TXO_MACRO_TILE; 692 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 693 tile_flags |= R300_TXO_MICRO_TILE; 694 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 695 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 696 697 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 698 tmp |= tile_flags; 699 ib[idx] = tmp; 700 } 701 track->textures[i].robj = reloc->robj; 702 track->tex_dirty = true; 703 break; 704 /* Tracked registers */ 705 case 0x2084: 706 /* VAP_VF_CNTL */ 707 track->vap_vf_cntl = idx_value; 708 break; 709 case 0x20B4: 710 /* VAP_VTX_SIZE */ 711 track->vtx_size = idx_value & 0x7F; 712 break; 713 case 0x2134: 714 /* VAP_VF_MAX_VTX_INDX */ 715 track->max_indx = idx_value & 0x00FFFFFFUL; 716 break; 717 case 0x2088: 718 /* VAP_ALT_NUM_VERTICES - only valid on r500 */ 719 if (p->rdev->family < CHIP_RV515) 720 goto fail; 721 track->vap_alt_nverts = idx_value & 0xFFFFFF; 722 break; 723 case 0x43E4: 724 /* SC_SCISSOR1 */ 725 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 726 if (p->rdev->family < CHIP_RV515) { 727 track->maxy -= 1440; 728 } 729 track->cb_dirty = true; 730 track->zb_dirty = true; 731 break; 732 case 0x4E00: 733 /* RB3D_CCTL */ 734 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 735 p->rdev->cmask_filp != p->filp) { 736 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 737 return -EINVAL; 738 } 739 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 740 track->cb_dirty = true; 741 break; 742 case 0x4E38: 743 case 0x4E3C: 744 case 0x4E40: 745 case 0x4E44: 746 /* RB3D_COLORPITCH0 */ 747 /* RB3D_COLORPITCH1 */ 748 /* RB3D_COLORPITCH2 */ 749 /* RB3D_COLORPITCH3 */ 750 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 751 r = r100_cs_packet_next_reloc(p, &reloc); 752 if (r) { 753 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 754 idx, reg); 755 r100_cs_dump_packet(p, pkt); 756 return r; 757 } 758 759 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 760 tile_flags |= R300_COLOR_TILE_ENABLE; 761 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 762 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 763 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 764 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 765 766 tmp = idx_value & ~(0x7 << 16); 767 tmp |= tile_flags; 768 ib[idx] = tmp; 769 } 770 i = (reg - 0x4E38) >> 2; 771 track->cb[i].pitch = idx_value & 0x3FFE; 772 switch (((idx_value >> 21) & 0xF)) { 773 case 9: 774 case 11: 775 case 12: 776 track->cb[i].cpp = 1; 777 break; 778 case 3: 779 case 4: 780 case 13: 781 case 15: 782 track->cb[i].cpp = 2; 783 break; 784 case 5: 785 if (p->rdev->family < CHIP_RV515) { 786 DRM_ERROR("Invalid color buffer format (%d)!\n", 787 ((idx_value >> 21) & 0xF)); 788 return -EINVAL; 789 } 790 /* Pass through. */ 791 case 6: 792 track->cb[i].cpp = 4; 793 break; 794 case 10: 795 track->cb[i].cpp = 8; 796 break; 797 case 7: 798 track->cb[i].cpp = 16; 799 break; 800 default: 801 DRM_ERROR("Invalid color buffer format (%d) !\n", 802 ((idx_value >> 21) & 0xF)); 803 return -EINVAL; 804 } 805 track->cb_dirty = true; 806 break; 807 case 0x4F00: 808 /* ZB_CNTL */ 809 if (idx_value & 2) { 810 track->z_enabled = true; 811 } else { 812 track->z_enabled = false; 813 } 814 track->zb_dirty = true; 815 break; 816 case 0x4F10: 817 /* ZB_FORMAT */ 818 switch ((idx_value & 0xF)) { 819 case 0: 820 case 1: 821 track->zb.cpp = 2; 822 break; 823 case 2: 824 track->zb.cpp = 4; 825 break; 826 default: 827 DRM_ERROR("Invalid z buffer format (%d) !\n", 828 (idx_value & 0xF)); 829 return -EINVAL; 830 } 831 track->zb_dirty = true; 832 break; 833 case 0x4F24: 834 /* ZB_DEPTHPITCH */ 835 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 836 r = r100_cs_packet_next_reloc(p, &reloc); 837 if (r) { 838 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 839 idx, reg); 840 r100_cs_dump_packet(p, pkt); 841 return r; 842 } 843 844 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 845 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 846 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 847 tile_flags |= R300_DEPTHMICROTILE_TILED; 848 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 849 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 850 851 tmp = idx_value & ~(0x7 << 16); 852 tmp |= tile_flags; 853 ib[idx] = tmp; 854 } 855 track->zb.pitch = idx_value & 0x3FFC; 856 track->zb_dirty = true; 857 break; 858 case 0x4104: 859 /* TX_ENABLE */ 860 for (i = 0; i < 16; i++) { 861 bool enabled; 862 863 enabled = !!(idx_value & (1 << i)); 864 track->textures[i].enabled = enabled; 865 } 866 track->tex_dirty = true; 867 break; 868 case 0x44C0: 869 case 0x44C4: 870 case 0x44C8: 871 case 0x44CC: 872 case 0x44D0: 873 case 0x44D4: 874 case 0x44D8: 875 case 0x44DC: 876 case 0x44E0: 877 case 0x44E4: 878 case 0x44E8: 879 case 0x44EC: 880 case 0x44F0: 881 case 0x44F4: 882 case 0x44F8: 883 case 0x44FC: 884 /* TX_FORMAT1_[0-15] */ 885 i = (reg - 0x44C0) >> 2; 886 tmp = (idx_value >> 25) & 0x3; 887 track->textures[i].tex_coord_type = tmp; 888 switch ((idx_value & 0x1F)) { 889 case R300_TX_FORMAT_X8: 890 case R300_TX_FORMAT_Y4X4: 891 case R300_TX_FORMAT_Z3Y3X2: 892 track->textures[i].cpp = 1; 893 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 894 break; 895 case R300_TX_FORMAT_X16: 896 case R300_TX_FORMAT_FL_I16: 897 case R300_TX_FORMAT_Y8X8: 898 case R300_TX_FORMAT_Z5Y6X5: 899 case R300_TX_FORMAT_Z6Y5X5: 900 case R300_TX_FORMAT_W4Z4Y4X4: 901 case R300_TX_FORMAT_W1Z5Y5X5: 902 case R300_TX_FORMAT_D3DMFT_CxV8U8: 903 case R300_TX_FORMAT_B8G8_B8G8: 904 case R300_TX_FORMAT_G8R8_G8B8: 905 track->textures[i].cpp = 2; 906 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 907 break; 908 case R300_TX_FORMAT_Y16X16: 909 case R300_TX_FORMAT_FL_I16A16: 910 case R300_TX_FORMAT_Z11Y11X10: 911 case R300_TX_FORMAT_Z10Y11X11: 912 case R300_TX_FORMAT_W8Z8Y8X8: 913 case R300_TX_FORMAT_W2Z10Y10X10: 914 case 0x17: 915 case R300_TX_FORMAT_FL_I32: 916 case 0x1e: 917 track->textures[i].cpp = 4; 918 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 919 break; 920 case R300_TX_FORMAT_W16Z16Y16X16: 921 case R300_TX_FORMAT_FL_R16G16B16A16: 922 case R300_TX_FORMAT_FL_I32A32: 923 track->textures[i].cpp = 8; 924 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 925 break; 926 case R300_TX_FORMAT_FL_R32G32B32A32: 927 track->textures[i].cpp = 16; 928 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 929 break; 930 case R300_TX_FORMAT_DXT1: 931 track->textures[i].cpp = 1; 932 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 933 break; 934 case R300_TX_FORMAT_ATI2N: 935 if (p->rdev->family < CHIP_R420) { 936 DRM_ERROR("Invalid texture format %u\n", 937 (idx_value & 0x1F)); 938 return -EINVAL; 939 } 940 /* The same rules apply as for DXT3/5. */ 941 /* Pass through. */ 942 case R300_TX_FORMAT_DXT3: 943 case R300_TX_FORMAT_DXT5: 944 track->textures[i].cpp = 1; 945 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 946 break; 947 default: 948 DRM_ERROR("Invalid texture format %u\n", 949 (idx_value & 0x1F)); 950 return -EINVAL; 951 } 952 track->tex_dirty = true; 953 break; 954 case 0x4400: 955 case 0x4404: 956 case 0x4408: 957 case 0x440C: 958 case 0x4410: 959 case 0x4414: 960 case 0x4418: 961 case 0x441C: 962 case 0x4420: 963 case 0x4424: 964 case 0x4428: 965 case 0x442C: 966 case 0x4430: 967 case 0x4434: 968 case 0x4438: 969 case 0x443C: 970 /* TX_FILTER0_[0-15] */ 971 i = (reg - 0x4400) >> 2; 972 tmp = idx_value & 0x7; 973 if (tmp == 2 || tmp == 4 || tmp == 6) { 974 track->textures[i].roundup_w = false; 975 } 976 tmp = (idx_value >> 3) & 0x7; 977 if (tmp == 2 || tmp == 4 || tmp == 6) { 978 track->textures[i].roundup_h = false; 979 } 980 track->tex_dirty = true; 981 break; 982 case 0x4500: 983 case 0x4504: 984 case 0x4508: 985 case 0x450C: 986 case 0x4510: 987 case 0x4514: 988 case 0x4518: 989 case 0x451C: 990 case 0x4520: 991 case 0x4524: 992 case 0x4528: 993 case 0x452C: 994 case 0x4530: 995 case 0x4534: 996 case 0x4538: 997 case 0x453C: 998 /* TX_FORMAT2_[0-15] */ 999 i = (reg - 0x4500) >> 2; 1000 tmp = idx_value & 0x3FFF; 1001 track->textures[i].pitch = tmp + 1; 1002 if (p->rdev->family >= CHIP_RV515) { 1003 tmp = ((idx_value >> 15) & 1) << 11; 1004 track->textures[i].width_11 = tmp; 1005 tmp = ((idx_value >> 16) & 1) << 11; 1006 track->textures[i].height_11 = tmp; 1007 1008 /* ATI1N */ 1009 if (idx_value & (1 << 14)) { 1010 /* The same rules apply as for DXT1. */ 1011 track->textures[i].compress_format = 1012 R100_TRACK_COMP_DXT1; 1013 } 1014 } else if (idx_value & (1 << 14)) { 1015 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1016 return -EINVAL; 1017 } 1018 track->tex_dirty = true; 1019 break; 1020 case 0x4480: 1021 case 0x4484: 1022 case 0x4488: 1023 case 0x448C: 1024 case 0x4490: 1025 case 0x4494: 1026 case 0x4498: 1027 case 0x449C: 1028 case 0x44A0: 1029 case 0x44A4: 1030 case 0x44A8: 1031 case 0x44AC: 1032 case 0x44B0: 1033 case 0x44B4: 1034 case 0x44B8: 1035 case 0x44BC: 1036 /* TX_FORMAT0_[0-15] */ 1037 i = (reg - 0x4480) >> 2; 1038 tmp = idx_value & 0x7FF; 1039 track->textures[i].width = tmp + 1; 1040 tmp = (idx_value >> 11) & 0x7FF; 1041 track->textures[i].height = tmp + 1; 1042 tmp = (idx_value >> 26) & 0xF; 1043 track->textures[i].num_levels = tmp; 1044 tmp = idx_value & (1U << 31); 1045 track->textures[i].use_pitch = !!tmp; 1046 tmp = (idx_value >> 22) & 0xF; 1047 track->textures[i].txdepth = tmp; 1048 track->tex_dirty = true; 1049 break; 1050 case R300_ZB_ZPASS_ADDR: 1051 r = r100_cs_packet_next_reloc(p, &reloc); 1052 if (r) { 1053 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1054 idx, reg); 1055 r100_cs_dump_packet(p, pkt); 1056 return r; 1057 } 1058 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1059 break; 1060 case 0x4e0c: 1061 /* RB3D_COLOR_CHANNEL_MASK */ 1062 track->color_channel_mask = idx_value; 1063 track->cb_dirty = true; 1064 break; 1065 case 0x43a4: 1066 /* SC_HYPERZ_EN */ 1067 /* r300c emits this register - we need to disable hyperz for it 1068 * without complaining */ 1069 if (p->rdev->hyperz_filp != p->filp) { 1070 if (idx_value & 0x1) 1071 ib[idx] = idx_value & ~1; 1072 } 1073 break; 1074 case 0x4f1c: 1075 /* ZB_BW_CNTL */ 1076 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1077 track->cb_dirty = true; 1078 track->zb_dirty = true; 1079 if (p->rdev->hyperz_filp != p->filp) { 1080 if (idx_value & (R300_HIZ_ENABLE | 1081 R300_RD_COMP_ENABLE | 1082 R300_WR_COMP_ENABLE | 1083 R300_FAST_FILL_ENABLE)) 1084 goto fail; 1085 } 1086 break; 1087 case 0x4e04: 1088 /* RB3D_BLENDCNTL */ 1089 track->blend_read_enable = !!(idx_value & (1 << 2)); 1090 track->cb_dirty = true; 1091 break; 1092 case R300_RB3D_AARESOLVE_OFFSET: 1093 r = r100_cs_packet_next_reloc(p, &reloc); 1094 if (r) { 1095 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1096 idx, reg); 1097 r100_cs_dump_packet(p, pkt); 1098 return r; 1099 } 1100 track->aa.robj = reloc->robj; 1101 track->aa.offset = idx_value; 1102 track->aa_dirty = true; 1103 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1104 break; 1105 case R300_RB3D_AARESOLVE_PITCH: 1106 track->aa.pitch = idx_value & 0x3FFE; 1107 track->aa_dirty = true; 1108 break; 1109 case R300_RB3D_AARESOLVE_CTL: 1110 track->aaresolve = idx_value & 0x1; 1111 track->aa_dirty = true; 1112 break; 1113 case 0x4f30: /* ZB_MASK_OFFSET */ 1114 case 0x4f34: /* ZB_ZMASK_PITCH */ 1115 case 0x4f44: /* ZB_HIZ_OFFSET */ 1116 case 0x4f54: /* ZB_HIZ_PITCH */ 1117 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1118 goto fail; 1119 break; 1120 case 0x4028: 1121 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1122 goto fail; 1123 /* GB_Z_PEQ_CONFIG */ 1124 if (p->rdev->family >= CHIP_RV350) 1125 break; 1126 goto fail; 1127 break; 1128 case 0x4be8: 1129 /* valid register only on RV530 */ 1130 if (p->rdev->family == CHIP_RV530) 1131 break; 1132 /* fallthrough do not move */ 1133 default: 1134 goto fail; 1135 } 1136 return 0; 1137fail: 1138 DRM_ERROR("Forbidden register 0x%04X in cs at %d (val=%08x)\n", 1139 reg, idx, idx_value); 1140 return -EINVAL; 1141} 1142 1143static int r300_packet3_check(struct radeon_cs_parser *p, 1144 struct radeon_cs_packet *pkt) 1145{ 1146 struct radeon_cs_reloc *reloc; 1147 struct r100_cs_track *track; 1148 volatile uint32_t *ib; 1149 unsigned idx; 1150 int r; 1151 1152 ib = p->ib.ptr; 1153 idx = pkt->idx + 1; 1154 track = (struct r100_cs_track *)p->track; 1155 switch(pkt->opcode) { 1156 case PACKET3_3D_LOAD_VBPNTR: 1157 r = r100_packet3_load_vbpntr(p, pkt, idx); 1158 if (r) 1159 return r; 1160 break; 1161 case PACKET3_INDX_BUFFER: 1162 r = r100_cs_packet_next_reloc(p, &reloc); 1163 if (r) { 1164 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1165 r100_cs_dump_packet(p, pkt); 1166 return r; 1167 } 1168 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1169 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1170 if (r) { 1171 return r; 1172 } 1173 break; 1174 /* Draw packet */ 1175 case PACKET3_3D_DRAW_IMMD: 1176 /* Number of dwords is vtx_size * (num_vertices - 1) 1177 * PRIM_WALK must be equal to 3 vertex data in embedded 1178 * in cmd stream */ 1179 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1180 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1181 return -EINVAL; 1182 } 1183 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1184 track->immd_dwords = pkt->count - 1; 1185 r = r100_cs_track_check(p->rdev, track); 1186 if (r) { 1187 return r; 1188 } 1189 break; 1190 case PACKET3_3D_DRAW_IMMD_2: 1191 /* Number of dwords is vtx_size * (num_vertices - 1) 1192 * PRIM_WALK must be equal to 3 vertex data in embedded 1193 * in cmd stream */ 1194 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1195 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1196 return -EINVAL; 1197 } 1198 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1199 track->immd_dwords = pkt->count; 1200 r = r100_cs_track_check(p->rdev, track); 1201 if (r) { 1202 return r; 1203 } 1204 break; 1205 case PACKET3_3D_DRAW_VBUF: 1206 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1207 r = r100_cs_track_check(p->rdev, track); 1208 if (r) { 1209 return r; 1210 } 1211 break; 1212 case PACKET3_3D_DRAW_VBUF_2: 1213 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1214 r = r100_cs_track_check(p->rdev, track); 1215 if (r) { 1216 return r; 1217 } 1218 break; 1219 case PACKET3_3D_DRAW_INDX: 1220 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1221 r = r100_cs_track_check(p->rdev, track); 1222 if (r) { 1223 return r; 1224 } 1225 break; 1226 case PACKET3_3D_DRAW_INDX_2: 1227 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1228 r = r100_cs_track_check(p->rdev, track); 1229 if (r) { 1230 return r; 1231 } 1232 break; 1233 case PACKET3_3D_CLEAR_HIZ: 1234 case PACKET3_3D_CLEAR_ZMASK: 1235 if (p->rdev->hyperz_filp != p->filp) 1236 return -EINVAL; 1237 break; 1238 case PACKET3_3D_CLEAR_CMASK: 1239 if (p->rdev->cmask_filp != p->filp) 1240 return -EINVAL; 1241 break; 1242 case PACKET3_NOP: 1243 break; 1244 default: 1245 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1246 return -EINVAL; 1247 } 1248 return 0; 1249} 1250 1251int r300_cs_parse(struct radeon_cs_parser *p) 1252{ 1253 struct radeon_cs_packet pkt; 1254 struct r100_cs_track *track; 1255 int r; 1256 1257 track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); 1258 if (track == NULL) 1259 return -ENOMEM; 1260 r100_cs_track_clear(p->rdev, track); 1261 p->track = track; 1262 do { 1263 r = r100_cs_packet_parse(p, &pkt, p->idx); 1264 if (r) { 1265 free(p->track, DRM_MEM_DRIVER); 1266 p->track = NULL; 1267 return r; 1268 } 1269 p->idx += pkt.count + 2; 1270 switch (pkt.type) { 1271 case PACKET_TYPE0: 1272 r = r100_cs_parse_packet0(p, &pkt, 1273 p->rdev->config.r300.reg_safe_bm, 1274 p->rdev->config.r300.reg_safe_bm_size, 1275 &r300_packet0_check); 1276 break; 1277 case PACKET_TYPE2: 1278 break; 1279 case PACKET_TYPE3: 1280 r = r300_packet3_check(p, &pkt); 1281 break; 1282 default: 1283 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1284 free(p->track, DRM_MEM_DRIVER); 1285 p->track = NULL; 1286 return -EINVAL; 1287 } 1288 if (r) { 1289 free(p->track, DRM_MEM_DRIVER); 1290 p->track = NULL; 1291 return r; 1292 } 1293 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1294 free(p->track, DRM_MEM_DRIVER); 1295 p->track = NULL; 1296 return 0; 1297} 1298 1299void r300_set_reg_safe(struct radeon_device *rdev) 1300{ 1301 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1302 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1303} 1304 1305void r300_mc_program(struct radeon_device *rdev) 1306{ 1307 struct r100_mc_save save; 1308 int r; 1309 1310 r = r100_debugfs_mc_info_init(rdev); 1311 if (r) { 1312 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 1313 } 1314 1315 /* Stops all mc clients */ 1316 r100_mc_stop(rdev, &save); 1317 if (rdev->flags & RADEON_IS_AGP) { 1318 WREG32(R_00014C_MC_AGP_LOCATION, 1319 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1320 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1321 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1322 WREG32(R_00015C_AGP_BASE_2, 1323 upper_32_bits(rdev->mc.agp_base) & 0xff); 1324 } else { 1325 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1326 WREG32(R_000170_AGP_BASE, 0); 1327 WREG32(R_00015C_AGP_BASE_2, 0); 1328 } 1329 /* Wait for mc idle */ 1330 if (r300_mc_wait_for_idle(rdev)) 1331 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1332 /* Program MC, should be a 32bits limited address space */ 1333 WREG32(R_000148_MC_FB_LOCATION, 1334 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1335 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1336 r100_mc_resume(rdev, &save); 1337} 1338 1339void r300_clock_startup(struct radeon_device *rdev) 1340{ 1341 u32 tmp; 1342 1343 if (radeon_dynclks != -1 && radeon_dynclks) 1344 radeon_legacy_set_clock_gating(rdev, 1); 1345 /* We need to force on some of the block */ 1346 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1347 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1348 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1349 tmp |= S_00000D_FORCE_VAP(1); 1350 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1351} 1352 1353static int r300_startup(struct radeon_device *rdev) 1354{ 1355 int r; 1356 1357 /* set common regs */ 1358 r100_set_common_regs(rdev); 1359 /* program mc */ 1360 r300_mc_program(rdev); 1361 /* Resume clock */ 1362 r300_clock_startup(rdev); 1363 /* Initialize GPU configuration (# pipes, ...) */ 1364 r300_gpu_init(rdev); 1365 /* Initialize GART (initialize after TTM so we can allocate 1366 * memory through TTM but finalize after TTM) */ 1367 if (rdev->flags & RADEON_IS_PCIE) { 1368 r = rv370_pcie_gart_enable(rdev); 1369 if (r) 1370 return r; 1371 } 1372 1373 if (rdev->family == CHIP_R300 || 1374 rdev->family == CHIP_R350 || 1375 rdev->family == CHIP_RV350) 1376 r100_enable_bm(rdev); 1377 1378 if (rdev->flags & RADEON_IS_PCI) { 1379 r = r100_pci_gart_enable(rdev); 1380 if (r) 1381 return r; 1382 } 1383 1384 /* allocate wb buffer */ 1385 r = radeon_wb_init(rdev); 1386 if (r) 1387 return r; 1388 1389 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1390 if (r) { 1391 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1392 return r; 1393 } 1394 1395 /* Enable IRQ */ 1396 r100_irq_set(rdev); 1397 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1398 /* 1M ring buffer */ 1399 r = r100_cp_init(rdev, 1024 * 1024); 1400 if (r) { 1401 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1402 return r; 1403 } 1404 1405 r = radeon_ib_pool_init(rdev); 1406 if (r) { 1407 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1408 return r; 1409 } 1410 1411 return 0; 1412} 1413 1414int r300_resume(struct radeon_device *rdev) 1415{ 1416 int r; 1417 1418 /* Make sur GART are not working */ 1419 if (rdev->flags & RADEON_IS_PCIE) 1420 rv370_pcie_gart_disable(rdev); 1421 if (rdev->flags & RADEON_IS_PCI) 1422 r100_pci_gart_disable(rdev); 1423 /* Resume clock before doing reset */ 1424 r300_clock_startup(rdev); 1425 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1426 if (radeon_asic_reset(rdev)) { 1427 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1428 RREG32(R_000E40_RBBM_STATUS), 1429 RREG32(R_0007C0_CP_STAT)); 1430 } 1431 /* post */ 1432 radeon_combios_asic_init(rdev->ddev); 1433 /* Resume clock after posting */ 1434 r300_clock_startup(rdev); 1435 /* Initialize surface registers */ 1436 radeon_surface_init(rdev); 1437 1438 rdev->accel_working = true; 1439 r = r300_startup(rdev); 1440 if (r) { 1441 rdev->accel_working = false; 1442 } 1443 return r; 1444} 1445 1446int r300_suspend(struct radeon_device *rdev) 1447{ 1448 r100_cp_disable(rdev); 1449 radeon_wb_disable(rdev); 1450 r100_irq_disable(rdev); 1451 if (rdev->flags & RADEON_IS_PCIE) 1452 rv370_pcie_gart_disable(rdev); 1453 if (rdev->flags & RADEON_IS_PCI) 1454 r100_pci_gart_disable(rdev); 1455 return 0; 1456} 1457 1458void r300_fini(struct radeon_device *rdev) 1459{ 1460 r100_cp_fini(rdev); 1461 radeon_wb_fini(rdev); 1462 radeon_ib_pool_fini(rdev); 1463 radeon_gem_fini(rdev); 1464 if (rdev->flags & RADEON_IS_PCIE) 1465 rv370_pcie_gart_fini(rdev); 1466 if (rdev->flags & RADEON_IS_PCI) 1467 r100_pci_gart_fini(rdev); 1468 radeon_agp_fini(rdev); 1469 radeon_irq_kms_fini(rdev); 1470 radeon_fence_driver_fini(rdev); 1471 radeon_bo_fini(rdev); 1472 radeon_atombios_fini(rdev); 1473 free(rdev->bios, DRM_MEM_DRIVER); 1474 rdev->bios = NULL; 1475} 1476 1477int r300_init(struct radeon_device *rdev) 1478{ 1479 int r; 1480 1481 /* Disable VGA */ 1482 r100_vga_render_disable(rdev); 1483 /* Initialize scratch registers */ 1484 radeon_scratch_init(rdev); 1485 /* Initialize surface registers */ 1486 radeon_surface_init(rdev); 1487 /* TODO: disable VGA need to use VGA request */ 1488 /* restore some register to sane defaults */ 1489 r100_restore_sanity(rdev); 1490 /* BIOS*/ 1491 if (!radeon_get_bios(rdev)) { 1492 if (ASIC_IS_AVIVO(rdev)) 1493 return -EINVAL; 1494 } 1495 if (rdev->is_atom_bios) { 1496 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1497 return -EINVAL; 1498 } else { 1499 r = radeon_combios_init(rdev); 1500 if (r) 1501 return r; 1502 } 1503 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1504 if (radeon_asic_reset(rdev)) { 1505 dev_warn(rdev->dev, 1506 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1507 RREG32(R_000E40_RBBM_STATUS), 1508 RREG32(R_0007C0_CP_STAT)); 1509 } 1510 /* check if cards are posted or not */ 1511 if (radeon_boot_test_post_card(rdev) == false) 1512 return -EINVAL; 1513 /* Set asic errata */ 1514 r300_errata(rdev); 1515 /* Initialize clocks */ 1516 radeon_get_clock_info(rdev->ddev); 1517 /* initialize AGP */ 1518 if (rdev->flags & RADEON_IS_AGP) { 1519 r = radeon_agp_init(rdev); 1520 if (r) { 1521 radeon_agp_disable(rdev); 1522 } 1523 } 1524 /* initialize memory controller */ 1525 r300_mc_init(rdev); 1526 /* Fence driver */ 1527 r = radeon_fence_driver_init(rdev); 1528 if (r) 1529 return r; 1530 r = radeon_irq_kms_init(rdev); 1531 if (r) 1532 return r; 1533 /* Memory manager */ 1534 r = radeon_bo_init(rdev); 1535 if (r) 1536 return r; 1537 if (rdev->flags & RADEON_IS_PCIE) { 1538 r = rv370_pcie_gart_init(rdev); 1539 if (r) 1540 return r; 1541 } 1542 if (rdev->flags & RADEON_IS_PCI) { 1543 r = r100_pci_gart_init(rdev); 1544 if (r) 1545 return r; 1546 } 1547 r300_set_reg_safe(rdev); 1548 1549 rdev->accel_working = true; 1550 r = r300_startup(rdev); 1551 if (r) { 1552 /* Somethings want wront with the accel init stop accel */ 1553 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1554 r100_cp_fini(rdev); 1555 radeon_wb_fini(rdev); 1556 radeon_ib_pool_fini(rdev); 1557 radeon_irq_kms_fini(rdev); 1558 if (rdev->flags & RADEON_IS_PCIE) 1559 rv370_pcie_gart_fini(rdev); 1560 if (rdev->flags & RADEON_IS_PCI) 1561 r100_pci_gart_fini(rdev); 1562 radeon_agp_fini(rdev); 1563 rdev->accel_working = false; 1564 } 1565 return 0; 1566} 1567