1/* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Alex Deucher <alexander.deucher@amd.com> 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD$"); 29 30#include <dev/drm2/drmP.h> 31#include <dev/drm2/radeon/radeon_drm.h> 32#include "radeon.h" 33#include "radeon_asic.h" 34 35#include "evergreend.h" 36#include "evergreen_blit_shaders.h" 37#include "cayman_blit_shaders.h" 38#include "radeon_blit_common.h" 39 40/* emits 17 */ 41static void 42set_render_target(struct radeon_device *rdev, int format, 43 int w, int h, u64 gpu_addr) 44{ 45 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 46 u32 cb_color_info; 47 int pitch, slice; 48 49 h = roundup2(h, 8); 50 if (h < 8) 51 h = 8; 52 53 cb_color_info = CB_FORMAT(format) | 54 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | 55 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 56 pitch = (w / 8) - 1; 57 slice = ((w * h) / 64) - 1; 58 59 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); 60 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); 61 radeon_ring_write(ring, gpu_addr >> 8); 62 radeon_ring_write(ring, pitch); 63 radeon_ring_write(ring, slice); 64 radeon_ring_write(ring, 0); 65 radeon_ring_write(ring, cb_color_info); 66 radeon_ring_write(ring, 0); 67 radeon_ring_write(ring, (w - 1) | ((h - 1) << 16)); 68 radeon_ring_write(ring, 0); 69 radeon_ring_write(ring, 0); 70 radeon_ring_write(ring, 0); 71 radeon_ring_write(ring, 0); 72 radeon_ring_write(ring, 0); 73 radeon_ring_write(ring, 0); 74 radeon_ring_write(ring, 0); 75 radeon_ring_write(ring, 0); 76} 77 78/* emits 5dw */ 79static void 80cp_set_surface_sync(struct radeon_device *rdev, 81 u32 sync_type, u32 size, 82 u64 mc_addr) 83{ 84 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 85 u32 cp_coher_size; 86 87 if (size == 0xffffffff) 88 cp_coher_size = 0xffffffff; 89 else 90 cp_coher_size = ((size + 255) >> 8); 91 92 if (rdev->family >= CHIP_CAYMAN) { 93 /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync 94 * to the RB directly. For IBs, the CP programs this as part of the 95 * surface_sync packet. 96 */ 97 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 98 radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); 99 radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */ 100 } 101 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 102 radeon_ring_write(ring, sync_type); 103 radeon_ring_write(ring, cp_coher_size); 104 radeon_ring_write(ring, mc_addr >> 8); 105 radeon_ring_write(ring, 10); /* poll interval */ 106} 107 108/* emits 11dw + 1 surface sync = 16dw */ 109static void 110set_shaders(struct radeon_device *rdev) 111{ 112 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 113 u64 gpu_addr; 114 115 /* VS */ 116 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 117 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); 118 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); 119 radeon_ring_write(ring, gpu_addr >> 8); 120 radeon_ring_write(ring, 2); 121 radeon_ring_write(ring, 0); 122 123 /* PS */ 124 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; 125 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); 126 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); 127 radeon_ring_write(ring, gpu_addr >> 8); 128 radeon_ring_write(ring, 1); 129 radeon_ring_write(ring, 0); 130 radeon_ring_write(ring, 2); 131 132 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 133 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); 134} 135 136/* emits 10 + 1 sync (5) = 15 */ 137static void 138set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) 139{ 140 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 141 u32 sq_vtx_constant_word2, sq_vtx_constant_word3; 142 143 /* high addr, stride */ 144 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | 145 SQ_VTXC_STRIDE(16); 146#ifdef __BIG_ENDIAN 147 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); 148#endif 149 /* xyzw swizzles */ 150 sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) | 151 SQ_VTCX_SEL_Y(SQ_SEL_Y) | 152 SQ_VTCX_SEL_Z(SQ_SEL_Z) | 153 SQ_VTCX_SEL_W(SQ_SEL_W); 154 155 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); 156 radeon_ring_write(ring, 0x580); 157 radeon_ring_write(ring, gpu_addr & 0xffffffff); 158 radeon_ring_write(ring, 48 - 1); /* size */ 159 radeon_ring_write(ring, sq_vtx_constant_word2); 160 radeon_ring_write(ring, sq_vtx_constant_word3); 161 radeon_ring_write(ring, 0); 162 radeon_ring_write(ring, 0); 163 radeon_ring_write(ring, 0); 164 radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); 165 166 if ((rdev->family == CHIP_CEDAR) || 167 (rdev->family == CHIP_PALM) || 168 (rdev->family == CHIP_SUMO) || 169 (rdev->family == CHIP_SUMO2) || 170 (rdev->family == CHIP_CAICOS)) 171 cp_set_surface_sync(rdev, 172 PACKET3_TC_ACTION_ENA, 48, gpu_addr); 173 else 174 cp_set_surface_sync(rdev, 175 PACKET3_VC_ACTION_ENA, 48, gpu_addr); 176 177} 178 179/* emits 10 */ 180static void 181set_tex_resource(struct radeon_device *rdev, 182 int format, int w, int h, int pitch, 183 u64 gpu_addr, u32 size) 184{ 185 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 186 u32 sq_tex_resource_word0, sq_tex_resource_word1; 187 u32 sq_tex_resource_word4, sq_tex_resource_word7; 188 189 if (h < 1) 190 h = 1; 191 192 sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D); 193 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | 194 ((w - 1) << 18)); 195 sq_tex_resource_word1 = ((h - 1) << 0) | 196 TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 197 /* xyzw swizzles */ 198 sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) | 199 TEX_DST_SEL_Y(SQ_SEL_Y) | 200 TEX_DST_SEL_Z(SQ_SEL_Z) | 201 TEX_DST_SEL_W(SQ_SEL_W); 202 203 sq_tex_resource_word7 = format | 204 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); 205 206 cp_set_surface_sync(rdev, 207 PACKET3_TC_ACTION_ENA, size, gpu_addr); 208 209 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); 210 radeon_ring_write(ring, 0); 211 radeon_ring_write(ring, sq_tex_resource_word0); 212 radeon_ring_write(ring, sq_tex_resource_word1); 213 radeon_ring_write(ring, gpu_addr >> 8); 214 radeon_ring_write(ring, gpu_addr >> 8); 215 radeon_ring_write(ring, sq_tex_resource_word4); 216 radeon_ring_write(ring, 0); 217 radeon_ring_write(ring, 0); 218 radeon_ring_write(ring, sq_tex_resource_word7); 219} 220 221/* emits 12 */ 222static void 223set_scissors(struct radeon_device *rdev, int x1, int y1, 224 int x2, int y2) 225{ 226 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 227 /* workaround some hw bugs */ 228 if (x2 == 0) 229 x1 = 1; 230 if (y2 == 0) 231 y1 = 1; 232 if (rdev->family >= CHIP_CAYMAN) { 233 if ((x2 == 1) && (y2 == 1)) 234 x2 = 2; 235 } 236 237 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 238 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 239 radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); 240 radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); 241 242 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 243 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 244 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); 245 radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); 246 247 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 248 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 249 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); 250 radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); 251} 252 253/* emits 10 */ 254static void 255draw_auto(struct radeon_device *rdev) 256{ 257 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 258 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 259 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); 260 radeon_ring_write(ring, DI_PT_RECTLIST); 261 262 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); 263 radeon_ring_write(ring, 264#ifdef __BIG_ENDIAN 265 (2 << 2) | 266#endif 267 DI_INDEX_SIZE_16_BIT); 268 269 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); 270 radeon_ring_write(ring, 1); 271 272 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); 273 radeon_ring_write(ring, 3); 274 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); 275 276} 277 278/* emits 39 */ 279static void 280set_default_state(struct radeon_device *rdev) 281{ 282 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 283 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; 284 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; 285 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; 286 int num_ps_gprs, num_vs_gprs, num_temp_gprs; 287 int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; 288 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; 289 int num_hs_threads, num_ls_threads; 290 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; 291 int num_hs_stack_entries, num_ls_stack_entries; 292 u64 gpu_addr; 293 int dwords; 294 295 /* set clear context state */ 296 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 297 radeon_ring_write(ring, 0); 298 299 if (rdev->family < CHIP_CAYMAN) { 300 switch (rdev->family) { 301 case CHIP_CEDAR: 302 default: 303 num_ps_gprs = 93; 304 num_vs_gprs = 46; 305 num_temp_gprs = 4; 306 num_gs_gprs = 31; 307 num_es_gprs = 31; 308 num_hs_gprs = 23; 309 num_ls_gprs = 23; 310 num_ps_threads = 96; 311 num_vs_threads = 16; 312 num_gs_threads = 16; 313 num_es_threads = 16; 314 num_hs_threads = 16; 315 num_ls_threads = 16; 316 num_ps_stack_entries = 42; 317 num_vs_stack_entries = 42; 318 num_gs_stack_entries = 42; 319 num_es_stack_entries = 42; 320 num_hs_stack_entries = 42; 321 num_ls_stack_entries = 42; 322 break; 323 case CHIP_REDWOOD: 324 num_ps_gprs = 93; 325 num_vs_gprs = 46; 326 num_temp_gprs = 4; 327 num_gs_gprs = 31; 328 num_es_gprs = 31; 329 num_hs_gprs = 23; 330 num_ls_gprs = 23; 331 num_ps_threads = 128; 332 num_vs_threads = 20; 333 num_gs_threads = 20; 334 num_es_threads = 20; 335 num_hs_threads = 20; 336 num_ls_threads = 20; 337 num_ps_stack_entries = 42; 338 num_vs_stack_entries = 42; 339 num_gs_stack_entries = 42; 340 num_es_stack_entries = 42; 341 num_hs_stack_entries = 42; 342 num_ls_stack_entries = 42; 343 break; 344 case CHIP_JUNIPER: 345 num_ps_gprs = 93; 346 num_vs_gprs = 46; 347 num_temp_gprs = 4; 348 num_gs_gprs = 31; 349 num_es_gprs = 31; 350 num_hs_gprs = 23; 351 num_ls_gprs = 23; 352 num_ps_threads = 128; 353 num_vs_threads = 20; 354 num_gs_threads = 20; 355 num_es_threads = 20; 356 num_hs_threads = 20; 357 num_ls_threads = 20; 358 num_ps_stack_entries = 85; 359 num_vs_stack_entries = 85; 360 num_gs_stack_entries = 85; 361 num_es_stack_entries = 85; 362 num_hs_stack_entries = 85; 363 num_ls_stack_entries = 85; 364 break; 365 case CHIP_CYPRESS: 366 case CHIP_HEMLOCK: 367 num_ps_gprs = 93; 368 num_vs_gprs = 46; 369 num_temp_gprs = 4; 370 num_gs_gprs = 31; 371 num_es_gprs = 31; 372 num_hs_gprs = 23; 373 num_ls_gprs = 23; 374 num_ps_threads = 128; 375 num_vs_threads = 20; 376 num_gs_threads = 20; 377 num_es_threads = 20; 378 num_hs_threads = 20; 379 num_ls_threads = 20; 380 num_ps_stack_entries = 85; 381 num_vs_stack_entries = 85; 382 num_gs_stack_entries = 85; 383 num_es_stack_entries = 85; 384 num_hs_stack_entries = 85; 385 num_ls_stack_entries = 85; 386 break; 387 case CHIP_PALM: 388 num_ps_gprs = 93; 389 num_vs_gprs = 46; 390 num_temp_gprs = 4; 391 num_gs_gprs = 31; 392 num_es_gprs = 31; 393 num_hs_gprs = 23; 394 num_ls_gprs = 23; 395 num_ps_threads = 96; 396 num_vs_threads = 16; 397 num_gs_threads = 16; 398 num_es_threads = 16; 399 num_hs_threads = 16; 400 num_ls_threads = 16; 401 num_ps_stack_entries = 42; 402 num_vs_stack_entries = 42; 403 num_gs_stack_entries = 42; 404 num_es_stack_entries = 42; 405 num_hs_stack_entries = 42; 406 num_ls_stack_entries = 42; 407 break; 408 case CHIP_SUMO: 409 num_ps_gprs = 93; 410 num_vs_gprs = 46; 411 num_temp_gprs = 4; 412 num_gs_gprs = 31; 413 num_es_gprs = 31; 414 num_hs_gprs = 23; 415 num_ls_gprs = 23; 416 num_ps_threads = 96; 417 num_vs_threads = 25; 418 num_gs_threads = 25; 419 num_es_threads = 25; 420 num_hs_threads = 25; 421 num_ls_threads = 25; 422 num_ps_stack_entries = 42; 423 num_vs_stack_entries = 42; 424 num_gs_stack_entries = 42; 425 num_es_stack_entries = 42; 426 num_hs_stack_entries = 42; 427 num_ls_stack_entries = 42; 428 break; 429 case CHIP_SUMO2: 430 num_ps_gprs = 93; 431 num_vs_gprs = 46; 432 num_temp_gprs = 4; 433 num_gs_gprs = 31; 434 num_es_gprs = 31; 435 num_hs_gprs = 23; 436 num_ls_gprs = 23; 437 num_ps_threads = 96; 438 num_vs_threads = 25; 439 num_gs_threads = 25; 440 num_es_threads = 25; 441 num_hs_threads = 25; 442 num_ls_threads = 25; 443 num_ps_stack_entries = 85; 444 num_vs_stack_entries = 85; 445 num_gs_stack_entries = 85; 446 num_es_stack_entries = 85; 447 num_hs_stack_entries = 85; 448 num_ls_stack_entries = 85; 449 break; 450 case CHIP_BARTS: 451 num_ps_gprs = 93; 452 num_vs_gprs = 46; 453 num_temp_gprs = 4; 454 num_gs_gprs = 31; 455 num_es_gprs = 31; 456 num_hs_gprs = 23; 457 num_ls_gprs = 23; 458 num_ps_threads = 128; 459 num_vs_threads = 20; 460 num_gs_threads = 20; 461 num_es_threads = 20; 462 num_hs_threads = 20; 463 num_ls_threads = 20; 464 num_ps_stack_entries = 85; 465 num_vs_stack_entries = 85; 466 num_gs_stack_entries = 85; 467 num_es_stack_entries = 85; 468 num_hs_stack_entries = 85; 469 num_ls_stack_entries = 85; 470 break; 471 case CHIP_TURKS: 472 num_ps_gprs = 93; 473 num_vs_gprs = 46; 474 num_temp_gprs = 4; 475 num_gs_gprs = 31; 476 num_es_gprs = 31; 477 num_hs_gprs = 23; 478 num_ls_gprs = 23; 479 num_ps_threads = 128; 480 num_vs_threads = 20; 481 num_gs_threads = 20; 482 num_es_threads = 20; 483 num_hs_threads = 20; 484 num_ls_threads = 20; 485 num_ps_stack_entries = 42; 486 num_vs_stack_entries = 42; 487 num_gs_stack_entries = 42; 488 num_es_stack_entries = 42; 489 num_hs_stack_entries = 42; 490 num_ls_stack_entries = 42; 491 break; 492 case CHIP_CAICOS: 493 num_ps_gprs = 93; 494 num_vs_gprs = 46; 495 num_temp_gprs = 4; 496 num_gs_gprs = 31; 497 num_es_gprs = 31; 498 num_hs_gprs = 23; 499 num_ls_gprs = 23; 500 num_ps_threads = 128; 501 num_vs_threads = 10; 502 num_gs_threads = 10; 503 num_es_threads = 10; 504 num_hs_threads = 10; 505 num_ls_threads = 10; 506 num_ps_stack_entries = 42; 507 num_vs_stack_entries = 42; 508 num_gs_stack_entries = 42; 509 num_es_stack_entries = 42; 510 num_hs_stack_entries = 42; 511 num_ls_stack_entries = 42; 512 break; 513 } 514 515 if ((rdev->family == CHIP_CEDAR) || 516 (rdev->family == CHIP_PALM) || 517 (rdev->family == CHIP_SUMO) || 518 (rdev->family == CHIP_SUMO2) || 519 (rdev->family == CHIP_CAICOS)) 520 sq_config = 0; 521 else 522 sq_config = VC_ENABLE; 523 524 sq_config |= (EXPORT_SRC_C | 525 CS_PRIO(0) | 526 LS_PRIO(0) | 527 HS_PRIO(0) | 528 PS_PRIO(0) | 529 VS_PRIO(1) | 530 GS_PRIO(2) | 531 ES_PRIO(3)); 532 533 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | 534 NUM_VS_GPRS(num_vs_gprs) | 535 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); 536 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | 537 NUM_ES_GPRS(num_es_gprs)); 538 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | 539 NUM_LS_GPRS(num_ls_gprs)); 540 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | 541 NUM_VS_THREADS(num_vs_threads) | 542 NUM_GS_THREADS(num_gs_threads) | 543 NUM_ES_THREADS(num_es_threads)); 544 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | 545 NUM_LS_THREADS(num_ls_threads)); 546 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | 547 NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); 548 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | 549 NUM_ES_STACK_ENTRIES(num_es_stack_entries)); 550 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | 551 NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); 552 553 /* disable dyn gprs */ 554 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 555 radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); 556 radeon_ring_write(ring, 0); 557 558 /* setup LDS */ 559 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 560 radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); 561 radeon_ring_write(ring, 0x10001000); 562 563 /* SQ config */ 564 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11)); 565 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); 566 radeon_ring_write(ring, sq_config); 567 radeon_ring_write(ring, sq_gpr_resource_mgmt_1); 568 radeon_ring_write(ring, sq_gpr_resource_mgmt_2); 569 radeon_ring_write(ring, sq_gpr_resource_mgmt_3); 570 radeon_ring_write(ring, 0); 571 radeon_ring_write(ring, 0); 572 radeon_ring_write(ring, sq_thread_resource_mgmt); 573 radeon_ring_write(ring, sq_thread_resource_mgmt_2); 574 radeon_ring_write(ring, sq_stack_resource_mgmt_1); 575 radeon_ring_write(ring, sq_stack_resource_mgmt_2); 576 radeon_ring_write(ring, sq_stack_resource_mgmt_3); 577 } 578 579 /* CONTEXT_CONTROL */ 580 radeon_ring_write(ring, 0xc0012800); 581 radeon_ring_write(ring, 0x80000000); 582 radeon_ring_write(ring, 0x80000000); 583 584 /* SQ_VTX_BASE_VTX_LOC */ 585 radeon_ring_write(ring, 0xc0026f00); 586 radeon_ring_write(ring, 0x00000000); 587 radeon_ring_write(ring, 0x00000000); 588 radeon_ring_write(ring, 0x00000000); 589 590 /* SET_SAMPLER */ 591 radeon_ring_write(ring, 0xc0036e00); 592 radeon_ring_write(ring, 0x00000000); 593 radeon_ring_write(ring, 0x00000012); 594 radeon_ring_write(ring, 0x00000000); 595 radeon_ring_write(ring, 0x00000000); 596 597 /* set to DX10/11 mode */ 598 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 599 radeon_ring_write(ring, 1); 600 601 /* emit an IB pointing at default state */ 602 dwords = roundup2(rdev->r600_blit.state_len, 0x10); 603 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 604 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 605 radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC); 606 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); 607 radeon_ring_write(ring, dwords); 608 609} 610 611int evergreen_blit_init(struct radeon_device *rdev) 612{ 613 u32 obj_size; 614 int i, r, dwords; 615 void *ptr; 616 u32 packet2s[16]; 617 int num_packet2s = 0; 618 619 rdev->r600_blit.primitives.set_render_target = set_render_target; 620 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; 621 rdev->r600_blit.primitives.set_shaders = set_shaders; 622 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; 623 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; 624 rdev->r600_blit.primitives.set_scissors = set_scissors; 625 rdev->r600_blit.primitives.draw_auto = draw_auto; 626 rdev->r600_blit.primitives.set_default_state = set_default_state; 627 628 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ 629 rdev->r600_blit.ring_size_common += 55; /* shaders + def state */ 630 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ 631 rdev->r600_blit.ring_size_common += 5; /* done copy */ 632 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ 633 634 rdev->r600_blit.ring_size_per_loop = 74; 635 if (rdev->family >= CHIP_CAYMAN) 636 rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */ 637 638 rdev->r600_blit.max_dim = 16384; 639 640 rdev->r600_blit.state_offset = 0; 641 642 if (rdev->family < CHIP_CAYMAN) 643 rdev->r600_blit.state_len = evergreen_default_size; 644 else 645 rdev->r600_blit.state_len = cayman_default_size; 646 647 dwords = rdev->r600_blit.state_len; 648 while (dwords & 0xf) { 649 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); 650 dwords++; 651 } 652 653 obj_size = dwords * 4; 654 obj_size = roundup2(obj_size, 256); 655 656 rdev->r600_blit.vs_offset = obj_size; 657 if (rdev->family < CHIP_CAYMAN) 658 obj_size += evergreen_vs_size * 4; 659 else 660 obj_size += cayman_vs_size * 4; 661 obj_size = roundup2(obj_size, 256); 662 663 rdev->r600_blit.ps_offset = obj_size; 664 if (rdev->family < CHIP_CAYMAN) 665 obj_size += evergreen_ps_size * 4; 666 else 667 obj_size += cayman_ps_size * 4; 668 obj_size = roundup2(obj_size, 256); 669 670 /* pin copy shader into vram if not already initialized */ 671 if (!rdev->r600_blit.shader_obj) { 672 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, 673 RADEON_GEM_DOMAIN_VRAM, 674 NULL, &rdev->r600_blit.shader_obj); 675 if (r) { 676 DRM_ERROR("evergreen failed to allocate shader\n"); 677 return r; 678 } 679 680 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 681 if (unlikely(r != 0)) 682 return r; 683 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 684 &rdev->r600_blit.shader_gpu_addr); 685 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 686 if (r) { 687 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 688 return r; 689 } 690 } 691 692 DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", 693 obj_size, 694 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); 695 696 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 697 if (unlikely(r != 0)) 698 return r; 699 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); 700 if (r) { 701 DRM_ERROR("failed to map blit object %d\n", r); 702 return r; 703 } 704 705 if (rdev->family < CHIP_CAYMAN) { 706 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, 707 evergreen_default_state, rdev->r600_blit.state_len * 4); 708 709 if (num_packet2s) 710 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 711 packet2s, num_packet2s * 4); 712 for (i = 0; i < evergreen_vs_size; i++) 713 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); 714 for (i = 0; i < evergreen_ps_size; i++) 715 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); 716 } else { 717 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, 718 cayman_default_state, rdev->r600_blit.state_len * 4); 719 720 if (num_packet2s) 721 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 722 packet2s, num_packet2s * 4); 723 for (i = 0; i < cayman_vs_size; i++) 724 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]); 725 for (i = 0; i < cayman_ps_size; i++) 726 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]); 727 } 728 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 729 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 730 731 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 732 return 0; 733} 734