rv515.c revision 254885
1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/drm2/radeon/rv515.c 254885 2013-08-25 19:37:15Z dumbbell $"); 31 32#include <dev/drm2/drmP.h> 33#include "rv515d.h" 34#include "radeon.h" 35#include "radeon_asic.h" 36#include "atom.h" 37#include "rv515_reg_safe.h" 38 39/* This files gather functions specifics to: rv515 */ 40static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 41static int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 42static void rv515_gpu_init(struct radeon_device *rdev); 43 44static const u32 crtc_offsets[2] = 45{ 46 0, 47 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 48}; 49 50void rv515_debugfs(struct radeon_device *rdev) 51{ 52 if (r100_debugfs_rbbm_init(rdev)) { 53 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 54 } 55 if (rv515_debugfs_pipes_info_init(rdev)) { 56 DRM_ERROR("Failed to register debugfs file for pipes !\n"); 57 } 58 if (rv515_debugfs_ga_info_init(rdev)) { 59 DRM_ERROR("Failed to register debugfs file for pipes !\n"); 60 } 61} 62 63void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 64{ 65 int r; 66 67 r = radeon_ring_lock(rdev, ring, 64); 68 if (r) { 69 return; 70 } 71 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0)); 72 radeon_ring_write(ring, 73 ISYNC_ANY2D_IDLE3D | 74 ISYNC_ANY3D_IDLE2D | 75 ISYNC_WAIT_IDLEGUI | 76 ISYNC_CPSCRATCH_IDLEGUI); 77 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); 78 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 79 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 80 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 81 radeon_ring_write(ring, PACKET0(GB_SELECT, 0)); 82 radeon_ring_write(ring, 0); 83 radeon_ring_write(ring, PACKET0(GB_ENABLE, 0)); 84 radeon_ring_write(ring, 0); 85 radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0)); 86 radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1); 87 radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0)); 88 radeon_ring_write(ring, 0); 89 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); 90 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE); 91 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); 92 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE); 93 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); 94 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 95 radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0)); 96 radeon_ring_write(ring, 0); 97 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); 98 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE); 99 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); 100 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE); 101 radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0)); 102 radeon_ring_write(ring, 103 ((6 << MS_X0_SHIFT) | 104 (6 << MS_Y0_SHIFT) | 105 (6 << MS_X1_SHIFT) | 106 (6 << MS_Y1_SHIFT) | 107 (6 << MS_X2_SHIFT) | 108 (6 << MS_Y2_SHIFT) | 109 (6 << MSBD0_Y_SHIFT) | 110 (6 << MSBD0_X_SHIFT))); 111 radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0)); 112 radeon_ring_write(ring, 113 ((6 << MS_X3_SHIFT) | 114 (6 << MS_Y3_SHIFT) | 115 (6 << MS_X4_SHIFT) | 116 (6 << MS_Y4_SHIFT) | 117 (6 << MS_X5_SHIFT) | 118 (6 << MS_Y5_SHIFT) | 119 (6 << MSBD1_SHIFT))); 120 radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0)); 121 radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); 122 radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0)); 123 radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); 124 radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0)); 125 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); 126 radeon_ring_write(ring, PACKET0(0x20C8, 0)); 127 radeon_ring_write(ring, 0); 128 radeon_ring_unlock_commit(rdev, ring); 129} 130 131int rv515_mc_wait_for_idle(struct radeon_device *rdev) 132{ 133 unsigned i; 134 uint32_t tmp; 135 136 for (i = 0; i < rdev->usec_timeout; i++) { 137 /* read MC_STATUS */ 138 tmp = RREG32_MC(MC_STATUS); 139 if (tmp & MC_STATUS_IDLE) { 140 return 0; 141 } 142 DRM_UDELAY(1); 143 } 144 return -1; 145} 146 147void rv515_vga_render_disable(struct radeon_device *rdev) 148{ 149 WREG32(R_000300_VGA_RENDER_CONTROL, 150 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 151} 152 153static void rv515_gpu_init(struct radeon_device *rdev) 154{ 155 unsigned pipe_select_current, gb_pipe_select, tmp; 156 157 if (r100_gui_wait_for_idle(rdev)) { 158 DRM_ERROR("Failed to wait GUI idle while " 159 "resetting GPU. Bad things might happen.\n"); 160 } 161 rv515_vga_render_disable(rdev); 162 r420_pipes_init(rdev); 163 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); 164 tmp = RREG32(R300_DST_PIPE_CONFIG); 165 pipe_select_current = (tmp >> 2) & 3; 166 tmp = (1 << pipe_select_current) | 167 (((gb_pipe_select >> 8) & 0xF) << 4); 168 WREG32_PLL(0x000D, tmp); 169 if (r100_gui_wait_for_idle(rdev)) { 170 DRM_ERROR("Failed to wait GUI idle while " 171 "resetting GPU. Bad things might happen.\n"); 172 } 173 if (rv515_mc_wait_for_idle(rdev)) { 174 DRM_ERROR("Failed to wait MC idle while " 175 "programming pipes. Bad things might happen.\n"); 176 } 177} 178 179static void rv515_vram_get_type(struct radeon_device *rdev) 180{ 181 uint32_t tmp; 182 183 rdev->mc.vram_width = 128; 184 rdev->mc.vram_is_ddr = true; 185 tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; 186 switch (tmp) { 187 case 0: 188 rdev->mc.vram_width = 64; 189 break; 190 case 1: 191 rdev->mc.vram_width = 128; 192 break; 193 default: 194 rdev->mc.vram_width = 128; 195 break; 196 } 197} 198 199static void rv515_mc_init(struct radeon_device *rdev) 200{ 201 202 rv515_vram_get_type(rdev); 203 r100_vram_init_sizes(rdev); 204 radeon_vram_location(rdev, &rdev->mc, 0); 205 rdev->mc.gtt_base_align = 0; 206 if (!(rdev->flags & RADEON_IS_AGP)) 207 radeon_gtt_location(rdev, &rdev->mc); 208 radeon_update_bandwidth_info(rdev); 209} 210 211uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 212{ 213 uint32_t r; 214 215 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 216 r = RREG32(MC_IND_DATA); 217 WREG32(MC_IND_INDEX, 0); 218 return r; 219} 220 221void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 222{ 223 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 224 WREG32(MC_IND_DATA, (v)); 225 WREG32(MC_IND_INDEX, 0); 226} 227 228#if defined(CONFIG_DEBUG_FS) 229static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) 230{ 231 struct drm_info_node *node = (struct drm_info_node *) m->private; 232 struct drm_device *dev = node->minor->dev; 233 struct radeon_device *rdev = dev->dev_private; 234 uint32_t tmp; 235 236 tmp = RREG32(GB_PIPE_SELECT); 237 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); 238 tmp = RREG32(SU_REG_DEST); 239 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); 240 tmp = RREG32(GB_TILE_CONFIG); 241 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); 242 tmp = RREG32(DST_PIPE_CONFIG); 243 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); 244 return 0; 245} 246 247static int rv515_debugfs_ga_info(struct seq_file *m, void *data) 248{ 249 struct drm_info_node *node = (struct drm_info_node *) m->private; 250 struct drm_device *dev = node->minor->dev; 251 struct radeon_device *rdev = dev->dev_private; 252 uint32_t tmp; 253 254 tmp = RREG32(0x2140); 255 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); 256 radeon_asic_reset(rdev); 257 tmp = RREG32(0x425C); 258 seq_printf(m, "GA_IDLE 0x%08x\n", tmp); 259 return 0; 260} 261 262static struct drm_info_list rv515_pipes_info_list[] = { 263 {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL}, 264}; 265 266static struct drm_info_list rv515_ga_info_list[] = { 267 {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL}, 268}; 269#endif 270 271static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) 272{ 273#if defined(CONFIG_DEBUG_FS) 274 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1); 275#else 276 return 0; 277#endif 278} 279 280static int rv515_debugfs_ga_info_init(struct radeon_device *rdev) 281{ 282#if defined(CONFIG_DEBUG_FS) 283 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1); 284#else 285 return 0; 286#endif 287} 288 289void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) 290{ 291 u32 crtc_enabled, tmp, frame_count, blackout; 292 int i, j; 293 294 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); 295 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); 296 297 /* disable VGA render */ 298 WREG32(R_000300_VGA_RENDER_CONTROL, 0); 299 /* blank the display controllers */ 300 for (i = 0; i < rdev->num_crtc; i++) { 301 crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN; 302 if (crtc_enabled) { 303 save->crtc_enabled[i] = true; 304 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); 305 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { 306 radeon_wait_for_vblank(rdev, i); 307 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 308 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); 309 } 310 /* wait for the next frame */ 311 frame_count = radeon_get_vblank_counter(rdev, i); 312 for (j = 0; j < rdev->usec_timeout; j++) { 313 if (radeon_get_vblank_counter(rdev, i) != frame_count) 314 break; 315 DRM_UDELAY(1); 316 } 317 } else { 318 save->crtc_enabled[i] = false; 319 } 320 } 321 322 radeon_mc_wait_for_idle(rdev); 323 324 if (rdev->family >= CHIP_R600) { 325 if (rdev->family >= CHIP_RV770) 326 blackout = RREG32(R700_MC_CITF_CNTL); 327 else 328 blackout = RREG32(R600_CITF_CNTL); 329 if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) { 330 /* Block CPU access */ 331 WREG32(R600_BIF_FB_EN, 0); 332 /* blackout the MC */ 333 blackout |= R600_BLACKOUT_MASK; 334 if (rdev->family >= CHIP_RV770) 335 WREG32(R700_MC_CITF_CNTL, blackout); 336 else 337 WREG32(R600_CITF_CNTL, blackout); 338 } 339 } 340 /* wait for the MC to settle */ 341 DRM_UDELAY(100); 342} 343 344void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 345{ 346 u32 tmp, frame_count; 347 int i, j; 348 349 /* update crtc base addresses */ 350 for (i = 0; i < rdev->num_crtc; i++) { 351 if (rdev->family >= CHIP_RV770) { 352 if (i == 1) { 353 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 354 upper_32_bits(rdev->mc.vram_start)); 355 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 356 upper_32_bits(rdev->mc.vram_start)); 357 } else { 358 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 359 upper_32_bits(rdev->mc.vram_start)); 360 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 361 upper_32_bits(rdev->mc.vram_start)); 362 } 363 } 364 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 365 (u32)rdev->mc.vram_start); 366 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 367 (u32)rdev->mc.vram_start); 368 } 369 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 370 371 if (rdev->family >= CHIP_R600) { 372 /* unblackout the MC */ 373 if (rdev->family >= CHIP_RV770) 374 tmp = RREG32(R700_MC_CITF_CNTL); 375 else 376 tmp = RREG32(R600_CITF_CNTL); 377 tmp &= ~R600_BLACKOUT_MASK; 378 if (rdev->family >= CHIP_RV770) 379 WREG32(R700_MC_CITF_CNTL, tmp); 380 else 381 WREG32(R600_CITF_CNTL, tmp); 382 /* allow CPU access */ 383 WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN); 384 } 385 386 for (i = 0; i < rdev->num_crtc; i++) { 387 if (save->crtc_enabled[i]) { 388 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); 389 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 390 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); 391 /* wait for the next frame */ 392 frame_count = radeon_get_vblank_counter(rdev, i); 393 for (j = 0; j < rdev->usec_timeout; j++) { 394 if (radeon_get_vblank_counter(rdev, i) != frame_count) 395 break; 396 DRM_UDELAY(1); 397 } 398 } 399 } 400 /* Unlock vga access */ 401 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 402 DRM_MDELAY(1); 403 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 404} 405 406static void rv515_mc_program(struct radeon_device *rdev) 407{ 408 struct rv515_mc_save save; 409 410 /* Stops all mc clients */ 411 rv515_mc_stop(rdev, &save); 412 413 /* Wait for mc idle */ 414 if (rv515_mc_wait_for_idle(rdev)) 415 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 416 /* Write VRAM size in case we are limiting it */ 417 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 418 /* Program MC, should be a 32bits limited address space */ 419 WREG32_MC(R_000001_MC_FB_LOCATION, 420 S_000001_MC_FB_START(rdev->mc.vram_start >> 16) | 421 S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16)); 422 WREG32(R_000134_HDP_FB_LOCATION, 423 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 424 if (rdev->flags & RADEON_IS_AGP) { 425 WREG32_MC(R_000002_MC_AGP_LOCATION, 426 S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) | 427 S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 428 WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 429 WREG32_MC(R_000004_MC_AGP_BASE_2, 430 S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base))); 431 } else { 432 WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF); 433 WREG32_MC(R_000003_MC_AGP_BASE, 0); 434 WREG32_MC(R_000004_MC_AGP_BASE_2, 0); 435 } 436 437 rv515_mc_resume(rdev, &save); 438} 439 440void rv515_clock_startup(struct radeon_device *rdev) 441{ 442 if (radeon_dynclks != -1 && radeon_dynclks) 443 radeon_atom_set_clock_gating(rdev, 1); 444 /* We need to force on some of the block */ 445 WREG32_PLL(R_00000F_CP_DYN_CNTL, 446 RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1)); 447 WREG32_PLL(R_000011_E2_DYN_CNTL, 448 RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1)); 449 WREG32_PLL(R_000013_IDCT_DYN_CNTL, 450 RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1)); 451} 452 453static int rv515_startup(struct radeon_device *rdev) 454{ 455 int r; 456 457 rv515_mc_program(rdev); 458 /* Resume clock */ 459 rv515_clock_startup(rdev); 460 /* Initialize GPU configuration (# pipes, ...) */ 461 rv515_gpu_init(rdev); 462 /* Initialize GART (initialize after TTM so we can allocate 463 * memory through TTM but finalize after TTM) */ 464 if (rdev->flags & RADEON_IS_PCIE) { 465 r = rv370_pcie_gart_enable(rdev); 466 if (r) 467 return r; 468 } 469 470 /* allocate wb buffer */ 471 r = radeon_wb_init(rdev); 472 if (r) 473 return r; 474 475 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 476 if (r) { 477 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 478 return r; 479 } 480 481 /* Enable IRQ */ 482 rs600_irq_set(rdev); 483 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 484 /* 1M ring buffer */ 485 r = r100_cp_init(rdev, 1024 * 1024); 486 if (r) { 487 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 488 return r; 489 } 490 491 r = radeon_ib_pool_init(rdev); 492 if (r) { 493 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 494 return r; 495 } 496 497 return 0; 498} 499 500int rv515_resume(struct radeon_device *rdev) 501{ 502 int r; 503 504 /* Make sur GART are not working */ 505 if (rdev->flags & RADEON_IS_PCIE) 506 rv370_pcie_gart_disable(rdev); 507 /* Resume clock before doing reset */ 508 rv515_clock_startup(rdev); 509 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 510 if (radeon_asic_reset(rdev)) { 511 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 512 RREG32(R_000E40_RBBM_STATUS), 513 RREG32(R_0007C0_CP_STAT)); 514 } 515 /* post */ 516 atom_asic_init(rdev->mode_info.atom_context); 517 /* Resume clock after posting */ 518 rv515_clock_startup(rdev); 519 /* Initialize surface registers */ 520 radeon_surface_init(rdev); 521 522 rdev->accel_working = true; 523 r = rv515_startup(rdev); 524 if (r) { 525 rdev->accel_working = false; 526 } 527 return r; 528} 529 530int rv515_suspend(struct radeon_device *rdev) 531{ 532 r100_cp_disable(rdev); 533 radeon_wb_disable(rdev); 534 rs600_irq_disable(rdev); 535 if (rdev->flags & RADEON_IS_PCIE) 536 rv370_pcie_gart_disable(rdev); 537 return 0; 538} 539 540void rv515_set_safe_registers(struct radeon_device *rdev) 541{ 542 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; 543 rdev->config.r300.reg_safe_bm_size = DRM_ARRAY_SIZE(rv515_reg_safe_bm); 544} 545 546void rv515_fini(struct radeon_device *rdev) 547{ 548 r100_cp_fini(rdev); 549 radeon_wb_fini(rdev); 550 radeon_ib_pool_fini(rdev); 551 radeon_gem_fini(rdev); 552 rv370_pcie_gart_fini(rdev); 553 radeon_agp_fini(rdev); 554 radeon_irq_kms_fini(rdev); 555 radeon_fence_driver_fini(rdev); 556 radeon_bo_fini(rdev); 557 radeon_atombios_fini(rdev); 558 free(rdev->bios, DRM_MEM_DRIVER); 559 rdev->bios = NULL; 560} 561 562int rv515_init(struct radeon_device *rdev) 563{ 564 int r; 565 566 /* Initialize scratch registers */ 567 radeon_scratch_init(rdev); 568 /* Initialize surface registers */ 569 radeon_surface_init(rdev); 570 /* TODO: disable VGA need to use VGA request */ 571 /* restore some register to sane defaults */ 572 r100_restore_sanity(rdev); 573 /* BIOS*/ 574 if (!radeon_get_bios(rdev)) { 575 if (ASIC_IS_AVIVO(rdev)) 576 return -EINVAL; 577 } 578 if (rdev->is_atom_bios) { 579 r = radeon_atombios_init(rdev); 580 if (r) 581 return r; 582 } else { 583 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); 584 return -EINVAL; 585 } 586 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 587 if (radeon_asic_reset(rdev)) { 588 dev_warn(rdev->dev, 589 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 590 RREG32(R_000E40_RBBM_STATUS), 591 RREG32(R_0007C0_CP_STAT)); 592 } 593 /* check if cards are posted or not */ 594 if (radeon_boot_test_post_card(rdev) == false) 595 return -EINVAL; 596 /* Initialize clocks */ 597 radeon_get_clock_info(rdev->ddev); 598 /* initialize AGP */ 599 if (rdev->flags & RADEON_IS_AGP) { 600 r = radeon_agp_init(rdev); 601 if (r) { 602 radeon_agp_disable(rdev); 603 } 604 } 605 /* initialize memory controller */ 606 rv515_mc_init(rdev); 607 rv515_debugfs(rdev); 608 /* Fence driver */ 609 r = radeon_fence_driver_init(rdev); 610 if (r) 611 return r; 612 r = radeon_irq_kms_init(rdev); 613 if (r) 614 return r; 615 /* Memory manager */ 616 r = radeon_bo_init(rdev); 617 if (r) 618 return r; 619 r = rv370_pcie_gart_init(rdev); 620 if (r) 621 return r; 622 rv515_set_safe_registers(rdev); 623 624 rdev->accel_working = true; 625 r = rv515_startup(rdev); 626 if (r) { 627 /* Somethings want wront with the accel init stop accel */ 628 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 629 r100_cp_fini(rdev); 630 radeon_wb_fini(rdev); 631 radeon_ib_pool_fini(rdev); 632 radeon_irq_kms_fini(rdev); 633 rv370_pcie_gart_fini(rdev); 634 radeon_agp_fini(rdev); 635 rdev->accel_working = false; 636 } 637 return 0; 638} 639 640void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc) 641{ 642 int index_reg = 0x6578 + crtc->crtc_offset; 643 int data_reg = 0x657c + crtc->crtc_offset; 644 645 WREG32(0x659C + crtc->crtc_offset, 0x0); 646 WREG32(0x6594 + crtc->crtc_offset, 0x705); 647 WREG32(0x65A4 + crtc->crtc_offset, 0x10001); 648 WREG32(0x65D8 + crtc->crtc_offset, 0x0); 649 WREG32(0x65B0 + crtc->crtc_offset, 0x0); 650 WREG32(0x65C0 + crtc->crtc_offset, 0x0); 651 WREG32(0x65D4 + crtc->crtc_offset, 0x0); 652 WREG32(index_reg, 0x0); 653 WREG32(data_reg, 0x841880A8); 654 WREG32(index_reg, 0x1); 655 WREG32(data_reg, 0x84208680); 656 WREG32(index_reg, 0x2); 657 WREG32(data_reg, 0xBFF880B0); 658 WREG32(index_reg, 0x100); 659 WREG32(data_reg, 0x83D88088); 660 WREG32(index_reg, 0x101); 661 WREG32(data_reg, 0x84608680); 662 WREG32(index_reg, 0x102); 663 WREG32(data_reg, 0xBFF080D0); 664 WREG32(index_reg, 0x200); 665 WREG32(data_reg, 0x83988068); 666 WREG32(index_reg, 0x201); 667 WREG32(data_reg, 0x84A08680); 668 WREG32(index_reg, 0x202); 669 WREG32(data_reg, 0xBFF080F8); 670 WREG32(index_reg, 0x300); 671 WREG32(data_reg, 0x83588058); 672 WREG32(index_reg, 0x301); 673 WREG32(data_reg, 0x84E08660); 674 WREG32(index_reg, 0x302); 675 WREG32(data_reg, 0xBFF88120); 676 WREG32(index_reg, 0x400); 677 WREG32(data_reg, 0x83188040); 678 WREG32(index_reg, 0x401); 679 WREG32(data_reg, 0x85008660); 680 WREG32(index_reg, 0x402); 681 WREG32(data_reg, 0xBFF88150); 682 WREG32(index_reg, 0x500); 683 WREG32(data_reg, 0x82D88030); 684 WREG32(index_reg, 0x501); 685 WREG32(data_reg, 0x85408640); 686 WREG32(index_reg, 0x502); 687 WREG32(data_reg, 0xBFF88180); 688 WREG32(index_reg, 0x600); 689 WREG32(data_reg, 0x82A08018); 690 WREG32(index_reg, 0x601); 691 WREG32(data_reg, 0x85808620); 692 WREG32(index_reg, 0x602); 693 WREG32(data_reg, 0xBFF081B8); 694 WREG32(index_reg, 0x700); 695 WREG32(data_reg, 0x82608010); 696 WREG32(index_reg, 0x701); 697 WREG32(data_reg, 0x85A08600); 698 WREG32(index_reg, 0x702); 699 WREG32(data_reg, 0x800081F0); 700 WREG32(index_reg, 0x800); 701 WREG32(data_reg, 0x8228BFF8); 702 WREG32(index_reg, 0x801); 703 WREG32(data_reg, 0x85E085E0); 704 WREG32(index_reg, 0x802); 705 WREG32(data_reg, 0xBFF88228); 706 WREG32(index_reg, 0x10000); 707 WREG32(data_reg, 0x82A8BF00); 708 WREG32(index_reg, 0x10001); 709 WREG32(data_reg, 0x82A08CC0); 710 WREG32(index_reg, 0x10002); 711 WREG32(data_reg, 0x8008BEF8); 712 WREG32(index_reg, 0x10100); 713 WREG32(data_reg, 0x81F0BF28); 714 WREG32(index_reg, 0x10101); 715 WREG32(data_reg, 0x83608CA0); 716 WREG32(index_reg, 0x10102); 717 WREG32(data_reg, 0x8018BED0); 718 WREG32(index_reg, 0x10200); 719 WREG32(data_reg, 0x8148BF38); 720 WREG32(index_reg, 0x10201); 721 WREG32(data_reg, 0x84408C80); 722 WREG32(index_reg, 0x10202); 723 WREG32(data_reg, 0x8008BEB8); 724 WREG32(index_reg, 0x10300); 725 WREG32(data_reg, 0x80B0BF78); 726 WREG32(index_reg, 0x10301); 727 WREG32(data_reg, 0x85008C20); 728 WREG32(index_reg, 0x10302); 729 WREG32(data_reg, 0x8020BEA0); 730 WREG32(index_reg, 0x10400); 731 WREG32(data_reg, 0x8028BF90); 732 WREG32(index_reg, 0x10401); 733 WREG32(data_reg, 0x85E08BC0); 734 WREG32(index_reg, 0x10402); 735 WREG32(data_reg, 0x8018BE90); 736 WREG32(index_reg, 0x10500); 737 WREG32(data_reg, 0xBFB8BFB0); 738 WREG32(index_reg, 0x10501); 739 WREG32(data_reg, 0x86C08B40); 740 WREG32(index_reg, 0x10502); 741 WREG32(data_reg, 0x8010BE90); 742 WREG32(index_reg, 0x10600); 743 WREG32(data_reg, 0xBF58BFC8); 744 WREG32(index_reg, 0x10601); 745 WREG32(data_reg, 0x87A08AA0); 746 WREG32(index_reg, 0x10602); 747 WREG32(data_reg, 0x8010BE98); 748 WREG32(index_reg, 0x10700); 749 WREG32(data_reg, 0xBF10BFF0); 750 WREG32(index_reg, 0x10701); 751 WREG32(data_reg, 0x886089E0); 752 WREG32(index_reg, 0x10702); 753 WREG32(data_reg, 0x8018BEB0); 754 WREG32(index_reg, 0x10800); 755 WREG32(data_reg, 0xBED8BFE8); 756 WREG32(index_reg, 0x10801); 757 WREG32(data_reg, 0x89408940); 758 WREG32(index_reg, 0x10802); 759 WREG32(data_reg, 0xBFE8BED8); 760 WREG32(index_reg, 0x20000); 761 WREG32(data_reg, 0x80008000); 762 WREG32(index_reg, 0x20001); 763 WREG32(data_reg, 0x90008000); 764 WREG32(index_reg, 0x20002); 765 WREG32(data_reg, 0x80008000); 766 WREG32(index_reg, 0x20003); 767 WREG32(data_reg, 0x80008000); 768 WREG32(index_reg, 0x20100); 769 WREG32(data_reg, 0x80108000); 770 WREG32(index_reg, 0x20101); 771 WREG32(data_reg, 0x8FE0BF70); 772 WREG32(index_reg, 0x20102); 773 WREG32(data_reg, 0xBFE880C0); 774 WREG32(index_reg, 0x20103); 775 WREG32(data_reg, 0x80008000); 776 WREG32(index_reg, 0x20200); 777 WREG32(data_reg, 0x8018BFF8); 778 WREG32(index_reg, 0x20201); 779 WREG32(data_reg, 0x8F80BF08); 780 WREG32(index_reg, 0x20202); 781 WREG32(data_reg, 0xBFD081A0); 782 WREG32(index_reg, 0x20203); 783 WREG32(data_reg, 0xBFF88000); 784 WREG32(index_reg, 0x20300); 785 WREG32(data_reg, 0x80188000); 786 WREG32(index_reg, 0x20301); 787 WREG32(data_reg, 0x8EE0BEC0); 788 WREG32(index_reg, 0x20302); 789 WREG32(data_reg, 0xBFB082A0); 790 WREG32(index_reg, 0x20303); 791 WREG32(data_reg, 0x80008000); 792 WREG32(index_reg, 0x20400); 793 WREG32(data_reg, 0x80188000); 794 WREG32(index_reg, 0x20401); 795 WREG32(data_reg, 0x8E00BEA0); 796 WREG32(index_reg, 0x20402); 797 WREG32(data_reg, 0xBF8883C0); 798 WREG32(index_reg, 0x20403); 799 WREG32(data_reg, 0x80008000); 800 WREG32(index_reg, 0x20500); 801 WREG32(data_reg, 0x80188000); 802 WREG32(index_reg, 0x20501); 803 WREG32(data_reg, 0x8D00BE90); 804 WREG32(index_reg, 0x20502); 805 WREG32(data_reg, 0xBF588500); 806 WREG32(index_reg, 0x20503); 807 WREG32(data_reg, 0x80008008); 808 WREG32(index_reg, 0x20600); 809 WREG32(data_reg, 0x80188000); 810 WREG32(index_reg, 0x20601); 811 WREG32(data_reg, 0x8BC0BE98); 812 WREG32(index_reg, 0x20602); 813 WREG32(data_reg, 0xBF308660); 814 WREG32(index_reg, 0x20603); 815 WREG32(data_reg, 0x80008008); 816 WREG32(index_reg, 0x20700); 817 WREG32(data_reg, 0x80108000); 818 WREG32(index_reg, 0x20701); 819 WREG32(data_reg, 0x8A80BEB0); 820 WREG32(index_reg, 0x20702); 821 WREG32(data_reg, 0xBF0087C0); 822 WREG32(index_reg, 0x20703); 823 WREG32(data_reg, 0x80008008); 824 WREG32(index_reg, 0x20800); 825 WREG32(data_reg, 0x80108000); 826 WREG32(index_reg, 0x20801); 827 WREG32(data_reg, 0x8920BED0); 828 WREG32(index_reg, 0x20802); 829 WREG32(data_reg, 0xBED08920); 830 WREG32(index_reg, 0x20803); 831 WREG32(data_reg, 0x80008010); 832 WREG32(index_reg, 0x30000); 833 WREG32(data_reg, 0x90008000); 834 WREG32(index_reg, 0x30001); 835 WREG32(data_reg, 0x80008000); 836 WREG32(index_reg, 0x30100); 837 WREG32(data_reg, 0x8FE0BF90); 838 WREG32(index_reg, 0x30101); 839 WREG32(data_reg, 0xBFF880A0); 840 WREG32(index_reg, 0x30200); 841 WREG32(data_reg, 0x8F60BF40); 842 WREG32(index_reg, 0x30201); 843 WREG32(data_reg, 0xBFE88180); 844 WREG32(index_reg, 0x30300); 845 WREG32(data_reg, 0x8EC0BF00); 846 WREG32(index_reg, 0x30301); 847 WREG32(data_reg, 0xBFC88280); 848 WREG32(index_reg, 0x30400); 849 WREG32(data_reg, 0x8DE0BEE0); 850 WREG32(index_reg, 0x30401); 851 WREG32(data_reg, 0xBFA083A0); 852 WREG32(index_reg, 0x30500); 853 WREG32(data_reg, 0x8CE0BED0); 854 WREG32(index_reg, 0x30501); 855 WREG32(data_reg, 0xBF7884E0); 856 WREG32(index_reg, 0x30600); 857 WREG32(data_reg, 0x8BA0BED8); 858 WREG32(index_reg, 0x30601); 859 WREG32(data_reg, 0xBF508640); 860 WREG32(index_reg, 0x30700); 861 WREG32(data_reg, 0x8A60BEE8); 862 WREG32(index_reg, 0x30701); 863 WREG32(data_reg, 0xBF2087A0); 864 WREG32(index_reg, 0x30800); 865 WREG32(data_reg, 0x8900BF00); 866 WREG32(index_reg, 0x30801); 867 WREG32(data_reg, 0xBF008900); 868} 869 870struct rv515_watermark { 871 u32 lb_request_fifo_depth; 872 fixed20_12 num_line_pair; 873 fixed20_12 estimated_width; 874 fixed20_12 worst_case_latency; 875 fixed20_12 consumption_rate; 876 fixed20_12 active_time; 877 fixed20_12 dbpp; 878 fixed20_12 priority_mark_max; 879 fixed20_12 priority_mark; 880 fixed20_12 sclk; 881}; 882 883static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, 884 struct radeon_crtc *crtc, 885 struct rv515_watermark *wm) 886{ 887 struct drm_display_mode *mode = &crtc->base.mode; 888 fixed20_12 a, b, c; 889 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 890 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 891 892 if (!crtc->base.enabled) { 893 /* FIXME: wouldn't it better to set priority mark to maximum */ 894 wm->lb_request_fifo_depth = 4; 895 return; 896 } 897 898 if (crtc->vsc.full > dfixed_const(2)) 899 wm->num_line_pair.full = dfixed_const(2); 900 else 901 wm->num_line_pair.full = dfixed_const(1); 902 903 b.full = dfixed_const(mode->crtc_hdisplay); 904 c.full = dfixed_const(256); 905 a.full = dfixed_div(b, c); 906 request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); 907 request_fifo_depth.full = dfixed_ceil(request_fifo_depth); 908 if (a.full < dfixed_const(4)) { 909 wm->lb_request_fifo_depth = 4; 910 } else { 911 wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); 912 } 913 914 /* Determine consumption rate 915 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) 916 * vtaps = number of vertical taps, 917 * vsc = vertical scaling ratio, defined as source/destination 918 * hsc = horizontal scaling ration, defined as source/destination 919 */ 920 a.full = dfixed_const(mode->clock); 921 b.full = dfixed_const(1000); 922 a.full = dfixed_div(a, b); 923 pclk.full = dfixed_div(b, a); 924 if (crtc->rmx_type != RMX_OFF) { 925 b.full = dfixed_const(2); 926 if (crtc->vsc.full > b.full) 927 b.full = crtc->vsc.full; 928 b.full = dfixed_mul(b, crtc->hsc); 929 c.full = dfixed_const(2); 930 b.full = dfixed_div(b, c); 931 consumption_time.full = dfixed_div(pclk, b); 932 } else { 933 consumption_time.full = pclk.full; 934 } 935 a.full = dfixed_const(1); 936 wm->consumption_rate.full = dfixed_div(a, consumption_time); 937 938 939 /* Determine line time 940 * LineTime = total time for one line of displayhtotal 941 * LineTime = total number of horizontal pixels 942 * pclk = pixel clock period(ns) 943 */ 944 a.full = dfixed_const(crtc->base.mode.crtc_htotal); 945 line_time.full = dfixed_mul(a, pclk); 946 947 /* Determine active time 948 * ActiveTime = time of active region of display within one line, 949 * hactive = total number of horizontal active pixels 950 * htotal = total number of horizontal pixels 951 */ 952 a.full = dfixed_const(crtc->base.mode.crtc_htotal); 953 b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); 954 wm->active_time.full = dfixed_mul(line_time, b); 955 wm->active_time.full = dfixed_div(wm->active_time, a); 956 957 /* Determine chunk time 958 * ChunkTime = the time it takes the DCP to send one chunk of data 959 * to the LB which consists of pipeline delay and inter chunk gap 960 * sclk = system clock(Mhz) 961 */ 962 a.full = dfixed_const(600 * 1000); 963 chunk_time.full = dfixed_div(a, rdev->pm.sclk); 964 read_delay_latency.full = dfixed_const(1000); 965 966 /* Determine the worst case latency 967 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) 968 * WorstCaseLatency = worst case time from urgent to when the MC starts 969 * to return data 970 * READ_DELAY_IDLE_MAX = constant of 1us 971 * ChunkTime = time it takes the DCP to send one chunk of data to the LB 972 * which consists of pipeline delay and inter chunk gap 973 */ 974 if (dfixed_trunc(wm->num_line_pair) > 1) { 975 a.full = dfixed_const(3); 976 wm->worst_case_latency.full = dfixed_mul(a, chunk_time); 977 wm->worst_case_latency.full += read_delay_latency.full; 978 } else { 979 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; 980 } 981 982 /* Determine the tolerable latency 983 * TolerableLatency = Any given request has only 1 line time 984 * for the data to be returned 985 * LBRequestFifoDepth = Number of chunk requests the LB can 986 * put into the request FIFO for a display 987 * LineTime = total time for one line of display 988 * ChunkTime = the time it takes the DCP to send one chunk 989 * of data to the LB which consists of 990 * pipeline delay and inter chunk gap 991 */ 992 if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { 993 tolerable_latency.full = line_time.full; 994 } else { 995 tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); 996 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; 997 tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); 998 tolerable_latency.full = line_time.full - tolerable_latency.full; 999 } 1000 /* We assume worst case 32bits (4 bytes) */ 1001 wm->dbpp.full = dfixed_const(2 * 16); 1002 1003 /* Determine the maximum priority mark 1004 * width = viewport width in pixels 1005 */ 1006 a.full = dfixed_const(16); 1007 wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); 1008 wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); 1009 wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); 1010 1011 /* Determine estimated width */ 1012 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 1013 estimated_width.full = dfixed_div(estimated_width, consumption_time); 1014 if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 1015 wm->priority_mark.full = wm->priority_mark_max.full; 1016 } else { 1017 a.full = dfixed_const(16); 1018 wm->priority_mark.full = dfixed_div(estimated_width, a); 1019 wm->priority_mark.full = dfixed_ceil(wm->priority_mark); 1020 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 1021 } 1022} 1023 1024void rv515_bandwidth_avivo_update(struct radeon_device *rdev) 1025{ 1026 struct drm_display_mode *mode0 = NULL; 1027 struct drm_display_mode *mode1 = NULL; 1028 struct rv515_watermark wm0; 1029 struct rv515_watermark wm1; 1030 u32 tmp; 1031 u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; 1032 u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; 1033 fixed20_12 priority_mark02, priority_mark12, fill_rate; 1034 fixed20_12 a, b; 1035 1036 if (rdev->mode_info.crtcs[0]->base.enabled) 1037 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1038 if (rdev->mode_info.crtcs[1]->base.enabled) 1039 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 1040 rs690_line_buffer_adjust(rdev, mode0, mode1); 1041 1042 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); 1043 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); 1044 1045 tmp = wm0.lb_request_fifo_depth; 1046 tmp |= wm1.lb_request_fifo_depth << 16; 1047 WREG32(LB_MAX_REQ_OUTSTANDING, tmp); 1048 1049 if (mode0 && mode1) { 1050 if (dfixed_trunc(wm0.dbpp) > 64) 1051 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1052 else 1053 a.full = wm0.num_line_pair.full; 1054 if (dfixed_trunc(wm1.dbpp) > 64) 1055 b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1056 else 1057 b.full = wm1.num_line_pair.full; 1058 a.full += b.full; 1059 fill_rate.full = dfixed_div(wm0.sclk, a); 1060 if (wm0.consumption_rate.full > fill_rate.full) { 1061 b.full = wm0.consumption_rate.full - fill_rate.full; 1062 b.full = dfixed_mul(b, wm0.active_time); 1063 a.full = dfixed_const(16); 1064 b.full = dfixed_div(b, a); 1065 a.full = dfixed_mul(wm0.worst_case_latency, 1066 wm0.consumption_rate); 1067 priority_mark02.full = a.full + b.full; 1068 } else { 1069 a.full = dfixed_mul(wm0.worst_case_latency, 1070 wm0.consumption_rate); 1071 b.full = dfixed_const(16 * 1000); 1072 priority_mark02.full = dfixed_div(a, b); 1073 } 1074 if (wm1.consumption_rate.full > fill_rate.full) { 1075 b.full = wm1.consumption_rate.full - fill_rate.full; 1076 b.full = dfixed_mul(b, wm1.active_time); 1077 a.full = dfixed_const(16); 1078 b.full = dfixed_div(b, a); 1079 a.full = dfixed_mul(wm1.worst_case_latency, 1080 wm1.consumption_rate); 1081 priority_mark12.full = a.full + b.full; 1082 } else { 1083 a.full = dfixed_mul(wm1.worst_case_latency, 1084 wm1.consumption_rate); 1085 b.full = dfixed_const(16 * 1000); 1086 priority_mark12.full = dfixed_div(a, b); 1087 } 1088 if (wm0.priority_mark.full > priority_mark02.full) 1089 priority_mark02.full = wm0.priority_mark.full; 1090 if (dfixed_trunc(priority_mark02) < 0) 1091 priority_mark02.full = 0; 1092 if (wm0.priority_mark_max.full > priority_mark02.full) 1093 priority_mark02.full = wm0.priority_mark_max.full; 1094 if (wm1.priority_mark.full > priority_mark12.full) 1095 priority_mark12.full = wm1.priority_mark.full; 1096 if (dfixed_trunc(priority_mark12) < 0) 1097 priority_mark12.full = 0; 1098 if (wm1.priority_mark_max.full > priority_mark12.full) 1099 priority_mark12.full = wm1.priority_mark_max.full; 1100 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1101 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1102 if (rdev->disp_priority == 2) { 1103 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1104 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1105 } 1106 } else if (mode0) { 1107 if (dfixed_trunc(wm0.dbpp) > 64) 1108 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1109 else 1110 a.full = wm0.num_line_pair.full; 1111 fill_rate.full = dfixed_div(wm0.sclk, a); 1112 if (wm0.consumption_rate.full > fill_rate.full) { 1113 b.full = wm0.consumption_rate.full - fill_rate.full; 1114 b.full = dfixed_mul(b, wm0.active_time); 1115 a.full = dfixed_const(16); 1116 b.full = dfixed_div(b, a); 1117 a.full = dfixed_mul(wm0.worst_case_latency, 1118 wm0.consumption_rate); 1119 priority_mark02.full = a.full + b.full; 1120 } else { 1121 a.full = dfixed_mul(wm0.worst_case_latency, 1122 wm0.consumption_rate); 1123 b.full = dfixed_const(16); 1124 priority_mark02.full = dfixed_div(a, b); 1125 } 1126 if (wm0.priority_mark.full > priority_mark02.full) 1127 priority_mark02.full = wm0.priority_mark.full; 1128 if (dfixed_trunc(priority_mark02) < 0) 1129 priority_mark02.full = 0; 1130 if (wm0.priority_mark_max.full > priority_mark02.full) 1131 priority_mark02.full = wm0.priority_mark_max.full; 1132 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1133 if (rdev->disp_priority == 2) 1134 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1135 } else if (mode1) { 1136 if (dfixed_trunc(wm1.dbpp) > 64) 1137 a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1138 else 1139 a.full = wm1.num_line_pair.full; 1140 fill_rate.full = dfixed_div(wm1.sclk, a); 1141 if (wm1.consumption_rate.full > fill_rate.full) { 1142 b.full = wm1.consumption_rate.full - fill_rate.full; 1143 b.full = dfixed_mul(b, wm1.active_time); 1144 a.full = dfixed_const(16); 1145 b.full = dfixed_div(b, a); 1146 a.full = dfixed_mul(wm1.worst_case_latency, 1147 wm1.consumption_rate); 1148 priority_mark12.full = a.full + b.full; 1149 } else { 1150 a.full = dfixed_mul(wm1.worst_case_latency, 1151 wm1.consumption_rate); 1152 b.full = dfixed_const(16 * 1000); 1153 priority_mark12.full = dfixed_div(a, b); 1154 } 1155 if (wm1.priority_mark.full > priority_mark12.full) 1156 priority_mark12.full = wm1.priority_mark.full; 1157 if (dfixed_trunc(priority_mark12) < 0) 1158 priority_mark12.full = 0; 1159 if (wm1.priority_mark_max.full > priority_mark12.full) 1160 priority_mark12.full = wm1.priority_mark_max.full; 1161 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1162 if (rdev->disp_priority == 2) 1163 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1164 } 1165 1166 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 1167 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 1168 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 1169 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 1170} 1171 1172void rv515_bandwidth_update(struct radeon_device *rdev) 1173{ 1174 uint32_t tmp; 1175 struct drm_display_mode *mode0 = NULL; 1176 struct drm_display_mode *mode1 = NULL; 1177 1178 radeon_update_display_priority(rdev); 1179 1180 if (rdev->mode_info.crtcs[0]->base.enabled) 1181 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1182 if (rdev->mode_info.crtcs[1]->base.enabled) 1183 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 1184 /* 1185 * Set display0/1 priority up in the memory controller for 1186 * modes if the user specifies HIGH for displaypriority 1187 * option. 1188 */ 1189 if ((rdev->disp_priority == 2) && 1190 (rdev->family == CHIP_RV515)) { 1191 tmp = RREG32_MC(MC_MISC_LAT_TIMER); 1192 tmp &= ~MC_DISP1R_INIT_LAT_MASK; 1193 tmp &= ~MC_DISP0R_INIT_LAT_MASK; 1194 if (mode1) 1195 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); 1196 if (mode0) 1197 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); 1198 WREG32_MC(MC_MISC_LAT_TIMER, tmp); 1199 } 1200 rv515_bandwidth_avivo_update(rdev); 1201} 1202