1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <dev/drm2/drmP.h> 33#include "radeon.h" 34#include "radeon_asic.h" 35#include "r600d.h" 36#include "r600_reg_safe.h" 37#include "r600_cp.h" 38#include "r600_cs.h" 39 40static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 41 struct radeon_cs_reloc **cs_reloc); 42static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 43 struct radeon_cs_reloc **cs_reloc); 44typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 45static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 46 47 48struct r600_cs_track { 49 /* configuration we miror so that we use same code btw kms/ums */ 50 u32 group_size; 51 u32 nbanks; 52 u32 npipes; 53 /* value we track */ 54 u32 sq_config; 55 u32 log_nsamples; 56 u32 nsamples; 57 u32 cb_color_base_last[8]; 58 struct radeon_bo *cb_color_bo[8]; 59 u64 cb_color_bo_mc[8]; 60 u64 cb_color_bo_offset[8]; 61 struct radeon_bo *cb_color_frag_bo[8]; 62 u64 cb_color_frag_offset[8]; 63 struct radeon_bo *cb_color_tile_bo[8]; 64 u64 cb_color_tile_offset[8]; 65 u32 cb_color_mask[8]; 66 u32 cb_color_info[8]; 67 u32 cb_color_view[8]; 68 u32 cb_color_size_idx[8]; /* unused */ 69 u32 cb_target_mask; 70 u32 cb_shader_mask; /* unused */ 71 bool is_resolve; 72 u32 cb_color_size[8]; 73 u32 vgt_strmout_en; 74 u32 vgt_strmout_buffer_en; 75 struct radeon_bo *vgt_strmout_bo[4]; 76 u64 vgt_strmout_bo_mc[4]; /* unused */ 77 u32 vgt_strmout_bo_offset[4]; 78 u32 vgt_strmout_size[4]; 79 u32 db_depth_control; 80 u32 db_depth_info; 81 u32 db_depth_size_idx; 82 u32 db_depth_view; 83 u32 db_depth_size; 84 u32 db_offset; 85 struct radeon_bo *db_bo; 86 u64 db_bo_mc; 87 bool sx_misc_kill_all_prims; 88 bool cb_dirty; 89 bool db_dirty; 90 bool streamout_dirty; 91 struct radeon_bo *htile_bo; 92 u64 htile_offset; 93 u32 htile_surface; 94}; 95 96#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 97#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 98#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } 99#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 100#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } 101#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 102#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 103#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 104 105struct gpu_formats { 106 unsigned blockwidth; 107 unsigned blockheight; 108 unsigned blocksize; 109 unsigned valid_color; 110 enum radeon_family min_family; 111}; 112 113static const struct gpu_formats color_formats_table[] = { 114 /* 8 bit */ 115 FMT_8_BIT(V_038004_COLOR_8, 1), 116 FMT_8_BIT(V_038004_COLOR_4_4, 1), 117 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 118 FMT_8_BIT(V_038004_FMT_1, 0), 119 120 /* 16-bit */ 121 FMT_16_BIT(V_038004_COLOR_16, 1), 122 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 123 FMT_16_BIT(V_038004_COLOR_8_8, 1), 124 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 125 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 126 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 127 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 128 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 129 130 /* 24-bit */ 131 FMT_24_BIT(V_038004_FMT_8_8_8), 132 133 /* 32-bit */ 134 FMT_32_BIT(V_038004_COLOR_32, 1), 135 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 136 FMT_32_BIT(V_038004_COLOR_16_16, 1), 137 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 138 FMT_32_BIT(V_038004_COLOR_8_24, 1), 139 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 140 FMT_32_BIT(V_038004_COLOR_24_8, 1), 141 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 142 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 143 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 144 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 145 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 146 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 147 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 148 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 149 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 150 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 151 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 152 153 /* 48-bit */ 154 FMT_48_BIT(V_038004_FMT_16_16_16), 155 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 156 157 /* 64-bit */ 158 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 159 FMT_64_BIT(V_038004_COLOR_32_32, 1), 160 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 161 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 162 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 163 164 FMT_96_BIT(V_038004_FMT_32_32_32), 165 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 166 167 /* 128-bit */ 168 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 169 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 170 171 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 172 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 173 174 /* block compressed formats */ 175 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 176 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 177 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 178 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 179 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 180 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 181 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 182 183 /* The other Evergreen formats */ 184 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 185}; 186 187bool r600_fmt_is_valid_color(u32 format) 188{ 189 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 190 return false; 191 192 if (color_formats_table[format].valid_color) 193 return true; 194 195 return false; 196} 197 198bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) 199{ 200 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 201 return false; 202 203 if (family < color_formats_table[format].min_family) 204 return false; 205 206 if (color_formats_table[format].blockwidth > 0) 207 return true; 208 209 return false; 210} 211 212int r600_fmt_get_blocksize(u32 format) 213{ 214 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 215 return 0; 216 217 return color_formats_table[format].blocksize; 218} 219 220int r600_fmt_get_nblocksx(u32 format, u32 w) 221{ 222 unsigned bw; 223 224 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 225 return 0; 226 227 bw = color_formats_table[format].blockwidth; 228 if (bw == 0) 229 return 0; 230 231 return (w + bw - 1) / bw; 232} 233 234int r600_fmt_get_nblocksy(u32 format, u32 h) 235{ 236 unsigned bh; 237 238 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 239 return 0; 240 241 bh = color_formats_table[format].blockheight; 242 if (bh == 0) 243 return 0; 244 245 return (h + bh - 1) / bh; 246} 247 248struct array_mode_checker { 249 int array_mode; 250 u32 group_size; 251 u32 nbanks; 252 u32 npipes; 253 u32 nsamples; 254 u32 blocksize; 255}; 256 257/* returns alignment in pixels for pitch/height/depth and bytes for base */ 258static int r600_get_array_mode_alignment(struct array_mode_checker *values, 259 u32 *pitch_align, 260 u32 *height_align, 261 u32 *depth_align, 262 u64 *base_align) 263{ 264 u32 tile_width = 8; 265 u32 tile_height = 8; 266 u32 macro_tile_width = values->nbanks; 267 u32 macro_tile_height = values->npipes; 268 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 269 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 270 271 switch (values->array_mode) { 272 case ARRAY_LINEAR_GENERAL: 273 /* technically tile_width/_height for pitch/height */ 274 *pitch_align = 1; /* tile_width */ 275 *height_align = 1; /* tile_height */ 276 *depth_align = 1; 277 *base_align = 1; 278 break; 279 case ARRAY_LINEAR_ALIGNED: 280 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 281 *height_align = 1; 282 *depth_align = 1; 283 *base_align = values->group_size; 284 break; 285 case ARRAY_1D_TILED_THIN1: 286 *pitch_align = max((u32)tile_width, 287 (u32)(values->group_size / 288 (tile_height * values->blocksize * values->nsamples))); 289 *height_align = tile_height; 290 *depth_align = 1; 291 *base_align = values->group_size; 292 break; 293 case ARRAY_2D_TILED_THIN1: 294 *pitch_align = max((u32)macro_tile_width * tile_width, 295 (u32)((values->group_size * values->nbanks) / 296 (values->blocksize * values->nsamples * tile_width))); 297 *height_align = macro_tile_height * tile_height; 298 *depth_align = 1; 299 *base_align = max(macro_tile_bytes, 300 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 301 break; 302 default: 303 return -EINVAL; 304 } 305 306 return 0; 307} 308 309static void r600_cs_track_init(struct r600_cs_track *track) 310{ 311 int i; 312 313 /* assume DX9 mode */ 314 track->sq_config = DX9_CONSTS; 315 for (i = 0; i < 8; i++) { 316 track->cb_color_base_last[i] = 0; 317 track->cb_color_size[i] = 0; 318 track->cb_color_size_idx[i] = 0; 319 track->cb_color_info[i] = 0; 320 track->cb_color_view[i] = 0xFFFFFFFF; 321 track->cb_color_bo[i] = NULL; 322 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 323 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 324 track->cb_color_frag_bo[i] = NULL; 325 track->cb_color_frag_offset[i] = 0xFFFFFFFF; 326 track->cb_color_tile_bo[i] = NULL; 327 track->cb_color_tile_offset[i] = 0xFFFFFFFF; 328 track->cb_color_mask[i] = 0xFFFFFFFF; 329 } 330 track->is_resolve = false; 331 track->nsamples = 16; 332 track->log_nsamples = 4; 333 track->cb_target_mask = 0xFFFFFFFF; 334 track->cb_shader_mask = 0xFFFFFFFF; 335 track->cb_dirty = true; 336 track->db_bo = NULL; 337 track->db_bo_mc = 0xFFFFFFFF; 338 /* assume the biggest format and that htile is enabled */ 339 track->db_depth_info = 7 | (1 << 25); 340 track->db_depth_view = 0xFFFFC000; 341 track->db_depth_size = 0xFFFFFFFF; 342 track->db_depth_size_idx = 0; 343 track->db_depth_control = 0xFFFFFFFF; 344 track->db_dirty = true; 345 track->htile_bo = NULL; 346 track->htile_offset = 0xFFFFFFFF; 347 track->htile_surface = 0; 348 349 for (i = 0; i < 4; i++) { 350 track->vgt_strmout_size[i] = 0; 351 track->vgt_strmout_bo[i] = NULL; 352 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; 353 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; 354 } 355 track->streamout_dirty = true; 356 track->sx_misc_kill_all_prims = false; 357} 358 359static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 360{ 361 struct r600_cs_track *track = p->track; 362 u32 slice_tile_max, size, tmp; 363 u32 height, height_align, pitch, pitch_align, depth_align; 364 u64 base_offset, base_align; 365 struct array_mode_checker array_check; 366 volatile u32 *ib = p->ib.ptr; 367 unsigned array_mode; 368 u32 format; 369 /* When resolve is used, the second colorbuffer has always 1 sample. */ 370 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; 371 372 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 373 format = G_0280A0_FORMAT(track->cb_color_info[i]); 374 if (!r600_fmt_is_valid_color(format)) { 375 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 376 __func__, __LINE__, format, 377 i, track->cb_color_info[i]); 378 return -EINVAL; 379 } 380 /* pitch in pixels */ 381 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 382 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 383 slice_tile_max *= 64; 384 height = slice_tile_max / pitch; 385 if (height > 8192) 386 height = 8192; 387 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 388 389 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 390 array_check.array_mode = array_mode; 391 array_check.group_size = track->group_size; 392 array_check.nbanks = track->nbanks; 393 array_check.npipes = track->npipes; 394 array_check.nsamples = nsamples; 395 array_check.blocksize = r600_fmt_get_blocksize(format); 396 if (r600_get_array_mode_alignment(&array_check, 397 &pitch_align, &height_align, &depth_align, &base_align)) { 398 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 399 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 400 track->cb_color_info[i]); 401 return -EINVAL; 402 } 403 switch (array_mode) { 404 case V_0280A0_ARRAY_LINEAR_GENERAL: 405 break; 406 case V_0280A0_ARRAY_LINEAR_ALIGNED: 407 break; 408 case V_0280A0_ARRAY_1D_TILED_THIN1: 409 /* avoid breaking userspace */ 410 if (height > 7) 411 height &= ~0x7; 412 break; 413 case V_0280A0_ARRAY_2D_TILED_THIN1: 414 break; 415 default: 416 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 417 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 418 track->cb_color_info[i]); 419 return -EINVAL; 420 } 421 422 if (!IS_ALIGNED(pitch, pitch_align)) { 423 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 424 __func__, __LINE__, pitch, pitch_align, array_mode); 425 return -EINVAL; 426 } 427 if (!IS_ALIGNED(height, height_align)) { 428 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 429 __func__, __LINE__, height, height_align, array_mode); 430 return -EINVAL; 431 } 432 if (!IS_ALIGNED(base_offset, base_align)) { 433 dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i, 434 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 435 return -EINVAL; 436 } 437 438 /* check offset */ 439 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 440 r600_fmt_get_blocksize(format) * nsamples; 441 switch (array_mode) { 442 default: 443 case V_0280A0_ARRAY_LINEAR_GENERAL: 444 case V_0280A0_ARRAY_LINEAR_ALIGNED: 445 tmp += track->cb_color_view[i] & 0xFF; 446 break; 447 case V_0280A0_ARRAY_1D_TILED_THIN1: 448 case V_0280A0_ARRAY_2D_TILED_THIN1: 449 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; 450 break; 451 } 452 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 453 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 454 /* the initial DDX does bad things with the CB size occasionally */ 455 /* it rounds up height too far for slice tile max but the BO is smaller */ 456 /* r600c,g also seem to flush at bad times in some apps resulting in 457 * bogus values here. So for linear just allow anything to avoid breaking 458 * broken userspace. 459 */ 460 } else { 461 dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n", 462 __func__, i, array_mode, 463 (uintmax_t)track->cb_color_bo_offset[i], tmp, 464 radeon_bo_size(track->cb_color_bo[i]), 465 pitch, height, r600_fmt_get_nblocksx(format, pitch), 466 r600_fmt_get_nblocksy(format, height), 467 r600_fmt_get_blocksize(format)); 468 return -EINVAL; 469 } 470 } 471 /* limit max tile */ 472 tmp = (height * pitch) >> 6; 473 if (tmp < slice_tile_max) 474 slice_tile_max = tmp; 475 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 476 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 477 ib[track->cb_color_size_idx[i]] = tmp; 478 479 /* FMASK/CMASK */ 480 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 481 case V_0280A0_TILE_DISABLE: 482 break; 483 case V_0280A0_FRAG_ENABLE: 484 if (track->nsamples > 1) { 485 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); 486 /* the tile size is 8x8, but the size is in units of bits. 487 * for bytes, do just * 8. */ 488 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); 489 490 if (bytes + track->cb_color_frag_offset[i] > 491 radeon_bo_size(track->cb_color_frag_bo[i])) { 492 dev_warn(p->dev, "%s FMASK_TILE_MAX too large " 493 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 494 __func__, tile_max, bytes, 495 (uintmax_t)track->cb_color_frag_offset[i], 496 radeon_bo_size(track->cb_color_frag_bo[i])); 497 return -EINVAL; 498 } 499 } 500 /* fall through */ 501 case V_0280A0_CLEAR_ENABLE: 502 { 503 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); 504 /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. 505 * (128*128) / (8*8) / 2 = 128 bytes per block. */ 506 uint32_t bytes = (block_max + 1) * 128; 507 508 if (bytes + track->cb_color_tile_offset[i] > 509 radeon_bo_size(track->cb_color_tile_bo[i])) { 510 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " 511 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 512 __func__, block_max, bytes, 513 (uintmax_t)track->cb_color_tile_offset[i], 514 radeon_bo_size(track->cb_color_tile_bo[i])); 515 return -EINVAL; 516 } 517 break; 518 } 519 default: 520 dev_warn(p->dev, "%s invalid tile mode\n", __func__); 521 return -EINVAL; 522 } 523 return 0; 524} 525 526static int r600_cs_track_validate_db(struct radeon_cs_parser *p) 527{ 528 struct r600_cs_track *track = p->track; 529 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; 530 u32 height_align, pitch_align, depth_align; 531 u32 pitch = 8192; 532 u32 height = 8192; 533 u64 base_offset, base_align; 534 struct array_mode_checker array_check; 535 int array_mode; 536 volatile u32 *ib = p->ib.ptr; 537 538 539 if (track->db_bo == NULL) { 540 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 541 return -EINVAL; 542 } 543 switch (G_028010_FORMAT(track->db_depth_info)) { 544 case V_028010_DEPTH_16: 545 bpe = 2; 546 break; 547 case V_028010_DEPTH_X8_24: 548 case V_028010_DEPTH_8_24: 549 case V_028010_DEPTH_X8_24_FLOAT: 550 case V_028010_DEPTH_8_24_FLOAT: 551 case V_028010_DEPTH_32_FLOAT: 552 bpe = 4; 553 break; 554 case V_028010_DEPTH_X24_8_32_FLOAT: 555 bpe = 8; 556 break; 557 default: 558 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 559 return -EINVAL; 560 } 561 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 562 if (!track->db_depth_size_idx) { 563 dev_warn(p->dev, "z/stencil buffer size not set\n"); 564 return -EINVAL; 565 } 566 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 567 tmp = (tmp / bpe) >> 6; 568 if (!tmp) { 569 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 570 track->db_depth_size, bpe, track->db_offset, 571 radeon_bo_size(track->db_bo)); 572 return -EINVAL; 573 } 574 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 575 } else { 576 size = radeon_bo_size(track->db_bo); 577 /* pitch in pixels */ 578 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 579 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 580 slice_tile_max *= 64; 581 height = slice_tile_max / pitch; 582 if (height > 8192) 583 height = 8192; 584 base_offset = track->db_bo_mc + track->db_offset; 585 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 586 array_check.array_mode = array_mode; 587 array_check.group_size = track->group_size; 588 array_check.nbanks = track->nbanks; 589 array_check.npipes = track->npipes; 590 array_check.nsamples = track->nsamples; 591 array_check.blocksize = bpe; 592 if (r600_get_array_mode_alignment(&array_check, 593 &pitch_align, &height_align, &depth_align, &base_align)) { 594 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 595 G_028010_ARRAY_MODE(track->db_depth_info), 596 track->db_depth_info); 597 return -EINVAL; 598 } 599 switch (array_mode) { 600 case V_028010_ARRAY_1D_TILED_THIN1: 601 /* don't break userspace */ 602 height &= ~0x7; 603 break; 604 case V_028010_ARRAY_2D_TILED_THIN1: 605 break; 606 default: 607 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 608 G_028010_ARRAY_MODE(track->db_depth_info), 609 track->db_depth_info); 610 return -EINVAL; 611 } 612 613 if (!IS_ALIGNED(pitch, pitch_align)) { 614 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 615 __func__, __LINE__, pitch, pitch_align, array_mode); 616 return -EINVAL; 617 } 618 if (!IS_ALIGNED(height, height_align)) { 619 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 620 __func__, __LINE__, height, height_align, array_mode); 621 return -EINVAL; 622 } 623 if (!IS_ALIGNED(base_offset, base_align)) { 624 dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__, 625 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 626 return -EINVAL; 627 } 628 629 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 630 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 631 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 632 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 633 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 634 array_mode, 635 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 636 radeon_bo_size(track->db_bo)); 637 return -EINVAL; 638 } 639 } 640 641 /* hyperz */ 642 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 643 unsigned long size; 644 unsigned nbx, nby; 645 646 if (track->htile_bo == NULL) { 647 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 648 __func__, __LINE__, track->db_depth_info); 649 return -EINVAL; 650 } 651 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 652 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 653 __func__, __LINE__, track->db_depth_size); 654 return -EINVAL; 655 } 656 657 nbx = pitch; 658 nby = height; 659 if (G_028D24_LINEAR(track->htile_surface)) { 660 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ 661 nbx = roundup2(nbx, 16 * 8); 662 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 663 nby = roundup(nby, track->npipes * 8); 664 } else { 665 /* always assume 8x8 htile */ 666 /* align is htile align * 8, htile align vary according to 667 * number of pipe and tile width and nby 668 */ 669 switch (track->npipes) { 670 case 8: 671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 672 nbx = roundup2(nbx, 64 * 8); 673 nby = roundup2(nby, 64 * 8); 674 break; 675 case 4: 676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 677 nbx = roundup2(nbx, 64 * 8); 678 nby = roundup2(nby, 32 * 8); 679 break; 680 case 2: 681 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 682 nbx = roundup2(nbx, 32 * 8); 683 nby = roundup2(nby, 32 * 8); 684 break; 685 case 1: 686 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 687 nbx = roundup2(nbx, 32 * 8); 688 nby = roundup2(nby, 16 * 8); 689 break; 690 default: 691 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 692 __func__, __LINE__, track->npipes); 693 return -EINVAL; 694 } 695 } 696 /* compute number of htile */ 697 nbx = nbx >> 3; 698 nby = nby >> 3; 699 /* size must be aligned on npipes * 2K boundary */ 700 size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); 701 size += track->htile_offset; 702 703 if (size > radeon_bo_size(track->htile_bo)) { 704 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 705 __func__, __LINE__, radeon_bo_size(track->htile_bo), 706 size, nbx, nby); 707 return -EINVAL; 708 } 709 } 710 711 track->db_dirty = false; 712 return 0; 713} 714 715static int r600_cs_track_check(struct radeon_cs_parser *p) 716{ 717 struct r600_cs_track *track = p->track; 718 u32 tmp; 719 int r, i; 720 721 /* on legacy kernel we don't perform advanced check */ 722 if (p->rdev == NULL) 723 return 0; 724 725 /* check streamout */ 726 if (track->streamout_dirty && track->vgt_strmout_en) { 727 for (i = 0; i < 4; i++) { 728 if (track->vgt_strmout_buffer_en & (1 << i)) { 729 if (track->vgt_strmout_bo[i]) { 730 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 731 (u64)track->vgt_strmout_size[i]; 732 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 733 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", 734 i, (uintmax_t)offset, 735 radeon_bo_size(track->vgt_strmout_bo[i])); 736 return -EINVAL; 737 } 738 } else { 739 dev_warn(p->dev, "No buffer for streamout %d\n", i); 740 return -EINVAL; 741 } 742 } 743 } 744 track->streamout_dirty = false; 745 } 746 747 if (track->sx_misc_kill_all_prims) 748 return 0; 749 750 /* check that we have a cb for each enabled target, we don't check 751 * shader_mask because it seems mesa isn't always setting it :( 752 */ 753 if (track->cb_dirty) { 754 tmp = track->cb_target_mask; 755 756 /* We must check both colorbuffers for RESOLVE. */ 757 if (track->is_resolve) { 758 tmp |= 0xff; 759 } 760 761 for (i = 0; i < 8; i++) { 762 if ((tmp >> (i * 4)) & 0xF) { 763 /* at least one component is enabled */ 764 if (track->cb_color_bo[i] == NULL) { 765 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 766 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 767 return -EINVAL; 768 } 769 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 770 r = r600_cs_track_validate_cb(p, i); 771 if (r) 772 return r; 773 } 774 } 775 track->cb_dirty = false; 776 } 777 778 /* Check depth buffer */ 779 if (track->db_dirty && 780 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && 781 (G_028800_STENCIL_ENABLE(track->db_depth_control) || 782 G_028800_Z_ENABLE(track->db_depth_control))) { 783 r = r600_cs_track_validate_db(p); 784 if (r) 785 return r; 786 } 787 788 return 0; 789} 790 791/** 792 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 793 * @parser: parser structure holding parsing context. 794 * @pkt: where to store packet informations 795 * 796 * Assume that chunk_ib_index is properly set. Will return -EINVAL 797 * if packet is bigger than remaining ib size. or if packets is unknown. 798 **/ 799static int r600_cs_packet_parse(struct radeon_cs_parser *p, 800 struct radeon_cs_packet *pkt, 801 unsigned idx) 802{ 803 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 804 uint32_t header; 805 806 if (idx >= ib_chunk->length_dw) { 807 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 808 idx, ib_chunk->length_dw); 809 return -EINVAL; 810 } 811 header = radeon_get_ib_value(p, idx); 812 pkt->idx = idx; 813 pkt->type = CP_PACKET_GET_TYPE(header); 814 pkt->count = CP_PACKET_GET_COUNT(header); 815 pkt->one_reg_wr = 0; 816 switch (pkt->type) { 817 case PACKET_TYPE0: 818 pkt->reg = CP_PACKET0_GET_REG(header); 819 break; 820 case PACKET_TYPE3: 821 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 822 break; 823 case PACKET_TYPE2: 824 pkt->count = -1; 825 break; 826 default: 827 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 828 return -EINVAL; 829 } 830 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 831 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 832 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 833 return -EINVAL; 834 } 835 return 0; 836} 837 838/** 839 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 840 * @parser: parser structure holding parsing context. 841 * @data: pointer to relocation data 842 * @offset_start: starting offset 843 * @offset_mask: offset mask (to align start offset on) 844 * @reloc: reloc informations 845 * 846 * Check next packet is relocation packet3, do bo validation and compute 847 * GPU offset using the provided start. 848 **/ 849static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 850 struct radeon_cs_reloc **cs_reloc) 851{ 852 struct radeon_cs_chunk *relocs_chunk; 853 struct radeon_cs_packet p3reloc; 854 unsigned idx; 855 int r; 856 857 if (p->chunk_relocs_idx == -1) { 858 DRM_ERROR("No relocation chunk !\n"); 859 return -EINVAL; 860 } 861 *cs_reloc = NULL; 862 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 863 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 864 if (r) { 865 return r; 866 } 867 p->idx += p3reloc.count + 2; 868 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 869 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 870 p3reloc.idx); 871 return -EINVAL; 872 } 873 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 874 if (idx >= relocs_chunk->length_dw) { 875 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 876 idx, relocs_chunk->length_dw); 877 return -EINVAL; 878 } 879 /* FIXME: we assume reloc size is 4 dwords */ 880 *cs_reloc = p->relocs_ptr[(idx / 4)]; 881 return 0; 882} 883 884/** 885 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3 886 * @parser: parser structure holding parsing context. 887 * @data: pointer to relocation data 888 * @offset_start: starting offset 889 * @offset_mask: offset mask (to align start offset on) 890 * @reloc: reloc informations 891 * 892 * Check next packet is relocation packet3, do bo validation and compute 893 * GPU offset using the provided start. 894 **/ 895static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 896 struct radeon_cs_reloc **cs_reloc) 897{ 898 struct radeon_cs_chunk *relocs_chunk; 899 struct radeon_cs_packet p3reloc; 900 unsigned idx; 901 int r; 902 903 if (p->chunk_relocs_idx == -1) { 904 DRM_ERROR("No relocation chunk !\n"); 905 return -EINVAL; 906 } 907 *cs_reloc = NULL; 908 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 909 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 910 if (r) { 911 return r; 912 } 913 p->idx += p3reloc.count + 2; 914 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 915 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 916 p3reloc.idx); 917 return -EINVAL; 918 } 919 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 920 if (idx >= relocs_chunk->length_dw) { 921 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 922 idx, relocs_chunk->length_dw); 923 return -EINVAL; 924 } 925 *cs_reloc = p->relocs; 926 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 927 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 928 return 0; 929} 930 931/** 932 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 933 * @parser: parser structure holding parsing context. 934 * 935 * Check next packet is relocation packet3, do bo validation and compute 936 * GPU offset using the provided start. 937 **/ 938static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 939{ 940 struct radeon_cs_packet p3reloc; 941 int r; 942 943 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 944 if (r) { 945 return 0; 946 } 947 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 948 return 0; 949 } 950 return 1; 951} 952 953/** 954 * r600_cs_packet_next_vline() - parse userspace VLINE packet 955 * @parser: parser structure holding parsing context. 956 * 957 * Userspace sends a special sequence for VLINE waits. 958 * PACKET0 - VLINE_START_END + value 959 * PACKET3 - WAIT_REG_MEM poll vline status reg 960 * RELOC (P3) - crtc_id in reloc. 961 * 962 * This function parses this and relocates the VLINE START END 963 * and WAIT_REG_MEM packets to the correct crtc. 964 * It also detects a switched off crtc and nulls out the 965 * wait in that case. 966 */ 967static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 968{ 969 struct drm_mode_object *obj; 970 struct drm_crtc *crtc; 971 struct radeon_crtc *radeon_crtc; 972 struct radeon_cs_packet p3reloc, wait_reg_mem; 973 int crtc_id; 974 int r; 975 uint32_t header, h_idx, reg, wait_reg_mem_info; 976 volatile uint32_t *ib; 977 978 ib = p->ib.ptr; 979 980 /* parse the WAIT_REG_MEM */ 981 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 982 if (r) 983 return r; 984 985 /* check its a WAIT_REG_MEM */ 986 if (wait_reg_mem.type != PACKET_TYPE3 || 987 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 988 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 989 return -EINVAL; 990 } 991 992 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 993 /* bit 4 is reg (0) or mem (1) */ 994 if (wait_reg_mem_info & 0x10) { 995 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 996 return -EINVAL; 997 } 998 /* waiting for value to be equal */ 999 if ((wait_reg_mem_info & 0x7) != 0x3) { 1000 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 1001 return -EINVAL; 1002 } 1003 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 1004 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 1005 return -EINVAL; 1006 } 1007 1008 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 1009 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 1010 return -EINVAL; 1011 } 1012 1013 /* jump over the NOP */ 1014 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 1015 if (r) 1016 return r; 1017 1018 h_idx = p->idx - 2; 1019 p->idx += wait_reg_mem.count + 2; 1020 p->idx += p3reloc.count + 2; 1021 1022 header = radeon_get_ib_value(p, h_idx); 1023 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 1024 reg = CP_PACKET0_GET_REG(header); 1025 1026 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1027 if (!obj) { 1028 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1029 return -EINVAL; 1030 } 1031 crtc = obj_to_crtc(obj); 1032 radeon_crtc = to_radeon_crtc(crtc); 1033 crtc_id = radeon_crtc->crtc_id; 1034 1035 if (!crtc->enabled) { 1036 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 1037 ib[h_idx + 2] = PACKET2(0); 1038 ib[h_idx + 3] = PACKET2(0); 1039 ib[h_idx + 4] = PACKET2(0); 1040 ib[h_idx + 5] = PACKET2(0); 1041 ib[h_idx + 6] = PACKET2(0); 1042 ib[h_idx + 7] = PACKET2(0); 1043 ib[h_idx + 8] = PACKET2(0); 1044 } else if (crtc_id == 1) { 1045 switch (reg) { 1046 case AVIVO_D1MODE_VLINE_START_END: 1047 header &= ~R600_CP_PACKET0_REG_MASK; 1048 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1049 break; 1050 default: 1051 DRM_ERROR("unknown crtc reloc\n"); 1052 return -EINVAL; 1053 } 1054 ib[h_idx] = header; 1055 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 1056 } 1057 1058 return 0; 1059} 1060 1061static int r600_packet0_check(struct radeon_cs_parser *p, 1062 struct radeon_cs_packet *pkt, 1063 unsigned idx, unsigned reg) 1064{ 1065 int r; 1066 1067 switch (reg) { 1068 case AVIVO_D1MODE_VLINE_START_END: 1069 r = r600_cs_packet_parse_vline(p); 1070 if (r) { 1071 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1072 idx, reg); 1073 return r; 1074 } 1075 break; 1076 default: 1077 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", 1078 reg, idx); 1079 return -EINVAL; 1080 } 1081 return 0; 1082} 1083 1084static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 1085 struct radeon_cs_packet *pkt) 1086{ 1087 unsigned reg, i; 1088 unsigned idx; 1089 int r; 1090 1091 idx = pkt->idx + 1; 1092 reg = pkt->reg; 1093 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 1094 r = r600_packet0_check(p, pkt, idx, reg); 1095 if (r) { 1096 return r; 1097 } 1098 } 1099 return 0; 1100} 1101 1102/** 1103 * r600_cs_check_reg() - check if register is authorized or not 1104 * @parser: parser structure holding parsing context 1105 * @reg: register we are testing 1106 * @idx: index into the cs buffer 1107 * 1108 * This function will test against r600_reg_safe_bm and return 0 1109 * if register is safe. If register is not flag as safe this function 1110 * will test it against a list of register needind special handling. 1111 */ 1112static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1113{ 1114 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 1115 struct radeon_cs_reloc *reloc; 1116 u32 m, i, tmp, *ib; 1117 int r; 1118 1119 i = (reg >> 7); 1120 if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { 1121 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1122 return -EINVAL; 1123 } 1124 m = 1 << ((reg >> 2) & 31); 1125 if (!(r600_reg_safe_bm[i] & m)) 1126 return 0; 1127 ib = p->ib.ptr; 1128 switch (reg) { 1129 /* force following reg to 0 in an attempt to disable out buffer 1130 * which will need us to better understand how it works to perform 1131 * security check on it (Jerome) 1132 */ 1133 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 1134 case R_008C44_SQ_ESGS_RING_SIZE: 1135 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 1136 case R_008C54_SQ_ESTMP_RING_SIZE: 1137 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 1138 case R_008C74_SQ_FBUF_RING_SIZE: 1139 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 1140 case R_008C5C_SQ_GSTMP_RING_SIZE: 1141 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 1142 case R_008C4C_SQ_GSVS_RING_SIZE: 1143 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 1144 case R_008C6C_SQ_PSTMP_RING_SIZE: 1145 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 1146 case R_008C7C_SQ_REDUC_RING_SIZE: 1147 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 1148 case R_008C64_SQ_VSTMP_RING_SIZE: 1149 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1150 /* get value to populate the IB don't remove */ 1151 tmp =radeon_get_ib_value(p, idx); 1152 ib[idx] = 0; 1153 break; 1154 case SQ_CONFIG: 1155 track->sq_config = radeon_get_ib_value(p, idx); 1156 break; 1157 case R_028800_DB_DEPTH_CONTROL: 1158 track->db_depth_control = radeon_get_ib_value(p, idx); 1159 track->db_dirty = true; 1160 break; 1161 case R_028010_DB_DEPTH_INFO: 1162 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1163 r600_cs_packet_next_is_pkt3_nop(p)) { 1164 r = r600_cs_packet_next_reloc(p, &reloc); 1165 if (r) { 1166 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1167 "0x%04X\n", reg); 1168 return -EINVAL; 1169 } 1170 track->db_depth_info = radeon_get_ib_value(p, idx); 1171 ib[idx] &= C_028010_ARRAY_MODE; 1172 track->db_depth_info &= C_028010_ARRAY_MODE; 1173 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1174 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1175 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1176 } else { 1177 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1178 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1179 } 1180 } else { 1181 track->db_depth_info = radeon_get_ib_value(p, idx); 1182 } 1183 track->db_dirty = true; 1184 break; 1185 case R_028004_DB_DEPTH_VIEW: 1186 track->db_depth_view = radeon_get_ib_value(p, idx); 1187 track->db_dirty = true; 1188 break; 1189 case R_028000_DB_DEPTH_SIZE: 1190 track->db_depth_size = radeon_get_ib_value(p, idx); 1191 track->db_depth_size_idx = idx; 1192 track->db_dirty = true; 1193 break; 1194 case R_028AB0_VGT_STRMOUT_EN: 1195 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 1196 track->streamout_dirty = true; 1197 break; 1198 case R_028B20_VGT_STRMOUT_BUFFER_EN: 1199 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 1200 track->streamout_dirty = true; 1201 break; 1202 case VGT_STRMOUT_BUFFER_BASE_0: 1203 case VGT_STRMOUT_BUFFER_BASE_1: 1204 case VGT_STRMOUT_BUFFER_BASE_2: 1205 case VGT_STRMOUT_BUFFER_BASE_3: 1206 r = r600_cs_packet_next_reloc(p, &reloc); 1207 if (r) { 1208 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1209 "0x%04X\n", reg); 1210 return -EINVAL; 1211 } 1212 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1213 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1214 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1215 track->vgt_strmout_bo[tmp] = reloc->robj; 1216 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; 1217 track->streamout_dirty = true; 1218 break; 1219 case VGT_STRMOUT_BUFFER_SIZE_0: 1220 case VGT_STRMOUT_BUFFER_SIZE_1: 1221 case VGT_STRMOUT_BUFFER_SIZE_2: 1222 case VGT_STRMOUT_BUFFER_SIZE_3: 1223 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; 1224 /* size in register is DWs, convert to bytes */ 1225 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; 1226 track->streamout_dirty = true; 1227 break; 1228 case CP_COHER_BASE: 1229 r = r600_cs_packet_next_reloc(p, &reloc); 1230 if (r) { 1231 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1232 "0x%04X\n", reg); 1233 return -EINVAL; 1234 } 1235 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1236 break; 1237 case R_028238_CB_TARGET_MASK: 1238 track->cb_target_mask = radeon_get_ib_value(p, idx); 1239 track->cb_dirty = true; 1240 break; 1241 case R_02823C_CB_SHADER_MASK: 1242 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1243 break; 1244 case R_028C04_PA_SC_AA_CONFIG: 1245 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1246 track->log_nsamples = tmp; 1247 track->nsamples = 1 << tmp; 1248 track->cb_dirty = true; 1249 break; 1250 case R_028808_CB_COLOR_CONTROL: 1251 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); 1252 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; 1253 track->cb_dirty = true; 1254 break; 1255 case R_0280A0_CB_COLOR0_INFO: 1256 case R_0280A4_CB_COLOR1_INFO: 1257 case R_0280A8_CB_COLOR2_INFO: 1258 case R_0280AC_CB_COLOR3_INFO: 1259 case R_0280B0_CB_COLOR4_INFO: 1260 case R_0280B4_CB_COLOR5_INFO: 1261 case R_0280B8_CB_COLOR6_INFO: 1262 case R_0280BC_CB_COLOR7_INFO: 1263 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1264 r600_cs_packet_next_is_pkt3_nop(p)) { 1265 r = r600_cs_packet_next_reloc(p, &reloc); 1266 if (r) { 1267 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1268 return -EINVAL; 1269 } 1270 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1271 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1272 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1273 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1274 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1275 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1276 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1277 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1278 } 1279 } else { 1280 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1281 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1282 } 1283 track->cb_dirty = true; 1284 break; 1285 case R_028080_CB_COLOR0_VIEW: 1286 case R_028084_CB_COLOR1_VIEW: 1287 case R_028088_CB_COLOR2_VIEW: 1288 case R_02808C_CB_COLOR3_VIEW: 1289 case R_028090_CB_COLOR4_VIEW: 1290 case R_028094_CB_COLOR5_VIEW: 1291 case R_028098_CB_COLOR6_VIEW: 1292 case R_02809C_CB_COLOR7_VIEW: 1293 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; 1294 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); 1295 track->cb_dirty = true; 1296 break; 1297 case R_028060_CB_COLOR0_SIZE: 1298 case R_028064_CB_COLOR1_SIZE: 1299 case R_028068_CB_COLOR2_SIZE: 1300 case R_02806C_CB_COLOR3_SIZE: 1301 case R_028070_CB_COLOR4_SIZE: 1302 case R_028074_CB_COLOR5_SIZE: 1303 case R_028078_CB_COLOR6_SIZE: 1304 case R_02807C_CB_COLOR7_SIZE: 1305 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1306 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1307 track->cb_color_size_idx[tmp] = idx; 1308 track->cb_dirty = true; 1309 break; 1310 /* This register were added late, there is userspace 1311 * which does provide relocation for those but set 1312 * 0 offset. In order to avoid breaking old userspace 1313 * we detect this and set address to point to last 1314 * CB_COLOR0_BASE, note that if userspace doesn't set 1315 * CB_COLOR0_BASE before this register we will report 1316 * error. Old userspace always set CB_COLOR0_BASE 1317 * before any of this. 1318 */ 1319 case R_0280E0_CB_COLOR0_FRAG: 1320 case R_0280E4_CB_COLOR1_FRAG: 1321 case R_0280E8_CB_COLOR2_FRAG: 1322 case R_0280EC_CB_COLOR3_FRAG: 1323 case R_0280F0_CB_COLOR4_FRAG: 1324 case R_0280F4_CB_COLOR5_FRAG: 1325 case R_0280F8_CB_COLOR6_FRAG: 1326 case R_0280FC_CB_COLOR7_FRAG: 1327 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1328 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1329 if (!track->cb_color_base_last[tmp]) { 1330 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1331 return -EINVAL; 1332 } 1333 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1334 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1335 ib[idx] = track->cb_color_base_last[tmp]; 1336 } else { 1337 r = r600_cs_packet_next_reloc(p, &reloc); 1338 if (r) { 1339 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1340 return -EINVAL; 1341 } 1342 track->cb_color_frag_bo[tmp] = reloc->robj; 1343 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1344 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1345 } 1346 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1347 track->cb_dirty = true; 1348 } 1349 break; 1350 case R_0280C0_CB_COLOR0_TILE: 1351 case R_0280C4_CB_COLOR1_TILE: 1352 case R_0280C8_CB_COLOR2_TILE: 1353 case R_0280CC_CB_COLOR3_TILE: 1354 case R_0280D0_CB_COLOR4_TILE: 1355 case R_0280D4_CB_COLOR5_TILE: 1356 case R_0280D8_CB_COLOR6_TILE: 1357 case R_0280DC_CB_COLOR7_TILE: 1358 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1359 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1360 if (!track->cb_color_base_last[tmp]) { 1361 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1362 return -EINVAL; 1363 } 1364 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1365 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1366 ib[idx] = track->cb_color_base_last[tmp]; 1367 } else { 1368 r = r600_cs_packet_next_reloc(p, &reloc); 1369 if (r) { 1370 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1371 return -EINVAL; 1372 } 1373 track->cb_color_tile_bo[tmp] = reloc->robj; 1374 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1375 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1376 } 1377 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1378 track->cb_dirty = true; 1379 } 1380 break; 1381 case R_028100_CB_COLOR0_MASK: 1382 case R_028104_CB_COLOR1_MASK: 1383 case R_028108_CB_COLOR2_MASK: 1384 case R_02810C_CB_COLOR3_MASK: 1385 case R_028110_CB_COLOR4_MASK: 1386 case R_028114_CB_COLOR5_MASK: 1387 case R_028118_CB_COLOR6_MASK: 1388 case R_02811C_CB_COLOR7_MASK: 1389 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1390 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); 1391 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1392 track->cb_dirty = true; 1393 } 1394 break; 1395 case CB_COLOR0_BASE: 1396 case CB_COLOR1_BASE: 1397 case CB_COLOR2_BASE: 1398 case CB_COLOR3_BASE: 1399 case CB_COLOR4_BASE: 1400 case CB_COLOR5_BASE: 1401 case CB_COLOR6_BASE: 1402 case CB_COLOR7_BASE: 1403 r = r600_cs_packet_next_reloc(p, &reloc); 1404 if (r) { 1405 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1406 "0x%04X\n", reg); 1407 return -EINVAL; 1408 } 1409 tmp = (reg - CB_COLOR0_BASE) / 4; 1410 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1411 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1412 track->cb_color_base_last[tmp] = ib[idx]; 1413 track->cb_color_bo[tmp] = reloc->robj; 1414 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1415 track->cb_dirty = true; 1416 break; 1417 case DB_DEPTH_BASE: 1418 r = r600_cs_packet_next_reloc(p, &reloc); 1419 if (r) { 1420 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1421 "0x%04X\n", reg); 1422 return -EINVAL; 1423 } 1424 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1425 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1426 track->db_bo = reloc->robj; 1427 track->db_bo_mc = reloc->lobj.gpu_offset; 1428 track->db_dirty = true; 1429 break; 1430 case DB_HTILE_DATA_BASE: 1431 r = r600_cs_packet_next_reloc(p, &reloc); 1432 if (r) { 1433 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1434 "0x%04X\n", reg); 1435 return -EINVAL; 1436 } 1437 track->htile_offset = radeon_get_ib_value(p, idx) << 8; 1438 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1439 track->htile_bo = reloc->robj; 1440 track->db_dirty = true; 1441 break; 1442 case DB_HTILE_SURFACE: 1443 track->htile_surface = radeon_get_ib_value(p, idx); 1444 /* force 8x8 htile width and height */ 1445 ib[idx] |= 3; 1446 track->db_dirty = true; 1447 break; 1448 case SQ_PGM_START_FS: 1449 case SQ_PGM_START_ES: 1450 case SQ_PGM_START_VS: 1451 case SQ_PGM_START_GS: 1452 case SQ_PGM_START_PS: 1453 case SQ_ALU_CONST_CACHE_GS_0: 1454 case SQ_ALU_CONST_CACHE_GS_1: 1455 case SQ_ALU_CONST_CACHE_GS_2: 1456 case SQ_ALU_CONST_CACHE_GS_3: 1457 case SQ_ALU_CONST_CACHE_GS_4: 1458 case SQ_ALU_CONST_CACHE_GS_5: 1459 case SQ_ALU_CONST_CACHE_GS_6: 1460 case SQ_ALU_CONST_CACHE_GS_7: 1461 case SQ_ALU_CONST_CACHE_GS_8: 1462 case SQ_ALU_CONST_CACHE_GS_9: 1463 case SQ_ALU_CONST_CACHE_GS_10: 1464 case SQ_ALU_CONST_CACHE_GS_11: 1465 case SQ_ALU_CONST_CACHE_GS_12: 1466 case SQ_ALU_CONST_CACHE_GS_13: 1467 case SQ_ALU_CONST_CACHE_GS_14: 1468 case SQ_ALU_CONST_CACHE_GS_15: 1469 case SQ_ALU_CONST_CACHE_PS_0: 1470 case SQ_ALU_CONST_CACHE_PS_1: 1471 case SQ_ALU_CONST_CACHE_PS_2: 1472 case SQ_ALU_CONST_CACHE_PS_3: 1473 case SQ_ALU_CONST_CACHE_PS_4: 1474 case SQ_ALU_CONST_CACHE_PS_5: 1475 case SQ_ALU_CONST_CACHE_PS_6: 1476 case SQ_ALU_CONST_CACHE_PS_7: 1477 case SQ_ALU_CONST_CACHE_PS_8: 1478 case SQ_ALU_CONST_CACHE_PS_9: 1479 case SQ_ALU_CONST_CACHE_PS_10: 1480 case SQ_ALU_CONST_CACHE_PS_11: 1481 case SQ_ALU_CONST_CACHE_PS_12: 1482 case SQ_ALU_CONST_CACHE_PS_13: 1483 case SQ_ALU_CONST_CACHE_PS_14: 1484 case SQ_ALU_CONST_CACHE_PS_15: 1485 case SQ_ALU_CONST_CACHE_VS_0: 1486 case SQ_ALU_CONST_CACHE_VS_1: 1487 case SQ_ALU_CONST_CACHE_VS_2: 1488 case SQ_ALU_CONST_CACHE_VS_3: 1489 case SQ_ALU_CONST_CACHE_VS_4: 1490 case SQ_ALU_CONST_CACHE_VS_5: 1491 case SQ_ALU_CONST_CACHE_VS_6: 1492 case SQ_ALU_CONST_CACHE_VS_7: 1493 case SQ_ALU_CONST_CACHE_VS_8: 1494 case SQ_ALU_CONST_CACHE_VS_9: 1495 case SQ_ALU_CONST_CACHE_VS_10: 1496 case SQ_ALU_CONST_CACHE_VS_11: 1497 case SQ_ALU_CONST_CACHE_VS_12: 1498 case SQ_ALU_CONST_CACHE_VS_13: 1499 case SQ_ALU_CONST_CACHE_VS_14: 1500 case SQ_ALU_CONST_CACHE_VS_15: 1501 r = r600_cs_packet_next_reloc(p, &reloc); 1502 if (r) { 1503 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1504 "0x%04X\n", reg); 1505 return -EINVAL; 1506 } 1507 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1508 break; 1509 case SX_MEMORY_EXPORT_BASE: 1510 r = r600_cs_packet_next_reloc(p, &reloc); 1511 if (r) { 1512 dev_warn(p->dev, "bad SET_CONFIG_REG " 1513 "0x%04X\n", reg); 1514 return -EINVAL; 1515 } 1516 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1517 break; 1518 case SX_MISC: 1519 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1520 break; 1521 default: 1522 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1523 return -EINVAL; 1524 } 1525 return 0; 1526} 1527 1528unsigned r600_mip_minify(unsigned size, unsigned level) 1529{ 1530 unsigned val; 1531 1532 val = max(1U, size >> level); 1533 if (level > 0) 1534 val = roundup_pow_of_two(val); 1535 return val; 1536} 1537 1538static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1539 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, 1540 unsigned block_align, unsigned height_align, unsigned base_align, 1541 unsigned *l0_size, unsigned *mipmap_size) 1542{ 1543 unsigned offset, i, level; 1544 unsigned width, height, depth, size; 1545 unsigned blocksize; 1546 unsigned nbx, nby; 1547 unsigned nlevels = llevel - blevel + 1; 1548 1549 *l0_size = -1; 1550 blocksize = r600_fmt_get_blocksize(format); 1551 1552 w0 = r600_mip_minify(w0, 0); 1553 h0 = r600_mip_minify(h0, 0); 1554 d0 = r600_mip_minify(d0, 0); 1555 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { 1556 width = r600_mip_minify(w0, i); 1557 nbx = r600_fmt_get_nblocksx(format, width); 1558 1559 nbx = roundup(nbx, block_align); 1560 1561 height = r600_mip_minify(h0, i); 1562 nby = r600_fmt_get_nblocksy(format, height); 1563 nby = roundup(nby, height_align); 1564 1565 depth = r600_mip_minify(d0, i); 1566 1567 size = nbx * nby * blocksize * nsamples; 1568 if (nfaces) 1569 size *= nfaces; 1570 else 1571 size *= depth; 1572 1573 if (i == 0) 1574 *l0_size = size; 1575 1576 if (i == 0 || i == 1) 1577 offset = roundup(offset, base_align); 1578 1579 offset += size; 1580 } 1581 *mipmap_size = offset; 1582 if (llevel == 0) 1583 *mipmap_size = *l0_size; 1584 if (!blevel) 1585 *mipmap_size -= *l0_size; 1586} 1587 1588/** 1589 * r600_check_texture_resource() - check if register is authorized or not 1590 * @p: parser structure holding parsing context 1591 * @idx: index into the cs buffer 1592 * @texture: texture's bo structure 1593 * @mipmap: mipmap's bo structure 1594 * 1595 * This function will check that the resource has valid field and that 1596 * the texture and mipmap bo object are big enough to cover this resource. 1597 */ 1598static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1599 struct radeon_bo *texture, 1600 struct radeon_bo *mipmap, 1601 u64 base_offset, 1602 u64 mip_offset, 1603 u32 tiling_flags) 1604{ 1605 struct r600_cs_track *track = p->track; 1606 u32 dim, nfaces, llevel, blevel, w0, h0, d0; 1607 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; 1608 u32 height_align, pitch, pitch_align, depth_align; 1609 u32 barray, larray; 1610 u64 base_align; 1611 struct array_mode_checker array_check; 1612 u32 format; 1613 bool is_array; 1614 1615 /* on legacy kernel we don't perform advanced check */ 1616 if (p->rdev == NULL) 1617 return 0; 1618 1619 /* convert to bytes */ 1620 base_offset <<= 8; 1621 mip_offset <<= 8; 1622 1623 word0 = radeon_get_ib_value(p, idx + 0); 1624 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1625 if (tiling_flags & RADEON_TILING_MACRO) 1626 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1627 else if (tiling_flags & RADEON_TILING_MICRO) 1628 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1629 } 1630 word1 = radeon_get_ib_value(p, idx + 1); 1631 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1632 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1633 word4 = radeon_get_ib_value(p, idx + 4); 1634 word5 = radeon_get_ib_value(p, idx + 5); 1635 dim = G_038000_DIM(word0); 1636 w0 = G_038000_TEX_WIDTH(word0) + 1; 1637 pitch = (G_038000_PITCH(word0) + 1) * 8; 1638 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1639 d0 = G_038004_TEX_DEPTH(word1); 1640 format = G_038004_DATA_FORMAT(word1); 1641 blevel = G_038010_BASE_LEVEL(word4); 1642 llevel = G_038014_LAST_LEVEL(word5); 1643 /* pitch in texels */ 1644 array_check.array_mode = G_038000_TILE_MODE(word0); 1645 array_check.group_size = track->group_size; 1646 array_check.nbanks = track->nbanks; 1647 array_check.npipes = track->npipes; 1648 array_check.nsamples = 1; 1649 array_check.blocksize = r600_fmt_get_blocksize(format); 1650 nfaces = 1; 1651 is_array = false; 1652 switch (dim) { 1653 case V_038000_SQ_TEX_DIM_1D: 1654 case V_038000_SQ_TEX_DIM_2D: 1655 case V_038000_SQ_TEX_DIM_3D: 1656 break; 1657 case V_038000_SQ_TEX_DIM_CUBEMAP: 1658 if (p->family >= CHIP_RV770) 1659 nfaces = 8; 1660 else 1661 nfaces = 6; 1662 break; 1663 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1664 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1665 is_array = true; 1666 break; 1667 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1668 is_array = true; 1669 /* fall through */ 1670 case V_038000_SQ_TEX_DIM_2D_MSAA: 1671 array_check.nsamples = 1 << llevel; 1672 llevel = 0; 1673 break; 1674 default: 1675 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1676 return -EINVAL; 1677 } 1678 if (!r600_fmt_is_valid_texture(format, p->family)) { 1679 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1680 __func__, __LINE__, format); 1681 return -EINVAL; 1682 } 1683 1684 if (r600_get_array_mode_alignment(&array_check, 1685 &pitch_align, &height_align, &depth_align, &base_align)) { 1686 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1687 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1688 return -EINVAL; 1689 } 1690 1691 /* XXX check height as well... */ 1692 1693 if (!IS_ALIGNED(pitch, pitch_align)) { 1694 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1695 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1696 return -EINVAL; 1697 } 1698 if (!IS_ALIGNED(base_offset, base_align)) { 1699 dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n", 1700 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1701 return -EINVAL; 1702 } 1703 if (!IS_ALIGNED(mip_offset, base_align)) { 1704 dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n", 1705 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1706 return -EINVAL; 1707 } 1708 1709 if (blevel > llevel) { 1710 dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1711 blevel, llevel); 1712 } 1713 if (is_array) { 1714 barray = G_038014_BASE_ARRAY(word5); 1715 larray = G_038014_LAST_ARRAY(word5); 1716 1717 nfaces = larray - barray + 1; 1718 } 1719 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, 1720 pitch_align, height_align, base_align, 1721 &l0_size, &mipmap_size); 1722 /* using get ib will give us the offset into the texture bo */ 1723 if ((l0_size + word2) > radeon_bo_size(texture)) { 1724 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1725 w0, h0, pitch_align, height_align, 1726 array_check.array_mode, format, word2, 1727 l0_size, radeon_bo_size(texture)); 1728 dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align); 1729 return -EINVAL; 1730 } 1731 /* using get ib will give us the offset into the mipmap bo */ 1732 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1733 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1734 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1735 } 1736 return 0; 1737} 1738 1739static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1740{ 1741 u32 m, i; 1742 1743 i = (reg >> 7); 1744 if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { 1745 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1746 return false; 1747 } 1748 m = 1 << ((reg >> 2) & 31); 1749 if (!(r600_reg_safe_bm[i] & m)) 1750 return true; 1751 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1752 return false; 1753} 1754 1755static int r600_packet3_check(struct radeon_cs_parser *p, 1756 struct radeon_cs_packet *pkt) 1757{ 1758 struct radeon_cs_reloc *reloc; 1759 struct r600_cs_track *track; 1760 volatile u32 *ib; 1761 unsigned idx; 1762 unsigned i; 1763 unsigned start_reg, end_reg, reg; 1764 int r; 1765 u32 idx_value; 1766 1767 track = (struct r600_cs_track *)p->track; 1768 ib = p->ib.ptr; 1769 idx = pkt->idx + 1; 1770 idx_value = radeon_get_ib_value(p, idx); 1771 1772 switch (pkt->opcode) { 1773 case PACKET3_SET_PREDICATION: 1774 { 1775 int pred_op; 1776 int tmp; 1777 uint64_t offset; 1778 1779 if (pkt->count != 1) { 1780 DRM_ERROR("bad SET PREDICATION\n"); 1781 return -EINVAL; 1782 } 1783 1784 tmp = radeon_get_ib_value(p, idx + 1); 1785 pred_op = (tmp >> 16) & 0x7; 1786 1787 /* for the clear predicate operation */ 1788 if (pred_op == 0) 1789 return 0; 1790 1791 if (pred_op > 2) { 1792 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1793 return -EINVAL; 1794 } 1795 1796 r = r600_cs_packet_next_reloc(p, &reloc); 1797 if (r) { 1798 DRM_ERROR("bad SET PREDICATION\n"); 1799 return -EINVAL; 1800 } 1801 1802 offset = reloc->lobj.gpu_offset + 1803 (idx_value & 0xfffffff0) + 1804 ((u64)(tmp & 0xff) << 32); 1805 1806 ib[idx + 0] = offset; 1807 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1808 } 1809 break; 1810 1811 case PACKET3_START_3D_CMDBUF: 1812 if (p->family >= CHIP_RV770 || pkt->count) { 1813 DRM_ERROR("bad START_3D\n"); 1814 return -EINVAL; 1815 } 1816 break; 1817 case PACKET3_CONTEXT_CONTROL: 1818 if (pkt->count != 1) { 1819 DRM_ERROR("bad CONTEXT_CONTROL\n"); 1820 return -EINVAL; 1821 } 1822 break; 1823 case PACKET3_INDEX_TYPE: 1824 case PACKET3_NUM_INSTANCES: 1825 if (pkt->count) { 1826 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1827 return -EINVAL; 1828 } 1829 break; 1830 case PACKET3_DRAW_INDEX: 1831 { 1832 uint64_t offset; 1833 if (pkt->count != 3) { 1834 DRM_ERROR("bad DRAW_INDEX\n"); 1835 return -EINVAL; 1836 } 1837 r = r600_cs_packet_next_reloc(p, &reloc); 1838 if (r) { 1839 DRM_ERROR("bad DRAW_INDEX\n"); 1840 return -EINVAL; 1841 } 1842 1843 offset = reloc->lobj.gpu_offset + 1844 idx_value + 1845 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1846 1847 ib[idx+0] = offset; 1848 ib[idx+1] = upper_32_bits(offset) & 0xff; 1849 1850 r = r600_cs_track_check(p); 1851 if (r) { 1852 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1853 return r; 1854 } 1855 break; 1856 } 1857 case PACKET3_DRAW_INDEX_AUTO: 1858 if (pkt->count != 1) { 1859 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1860 return -EINVAL; 1861 } 1862 r = r600_cs_track_check(p); 1863 if (r) { 1864 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1865 return r; 1866 } 1867 break; 1868 case PACKET3_DRAW_INDEX_IMMD_BE: 1869 case PACKET3_DRAW_INDEX_IMMD: 1870 if (pkt->count < 2) { 1871 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1872 return -EINVAL; 1873 } 1874 r = r600_cs_track_check(p); 1875 if (r) { 1876 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1877 return r; 1878 } 1879 break; 1880 case PACKET3_WAIT_REG_MEM: 1881 if (pkt->count != 5) { 1882 DRM_ERROR("bad WAIT_REG_MEM\n"); 1883 return -EINVAL; 1884 } 1885 /* bit 4 is reg (0) or mem (1) */ 1886 if (idx_value & 0x10) { 1887 uint64_t offset; 1888 1889 r = r600_cs_packet_next_reloc(p, &reloc); 1890 if (r) { 1891 DRM_ERROR("bad WAIT_REG_MEM\n"); 1892 return -EINVAL; 1893 } 1894 1895 offset = reloc->lobj.gpu_offset + 1896 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1897 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1898 1899 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1900 ib[idx+2] = upper_32_bits(offset) & 0xff; 1901 } 1902 break; 1903 case PACKET3_CP_DMA: 1904 { 1905 u32 command, size; 1906 u64 offset, tmp; 1907 if (pkt->count != 4) { 1908 DRM_ERROR("bad CP DMA\n"); 1909 return -EINVAL; 1910 } 1911 command = radeon_get_ib_value(p, idx+4); 1912 size = command & 0x1fffff; 1913 if (command & PACKET3_CP_DMA_CMD_SAS) { 1914 /* src address space is register */ 1915 DRM_ERROR("CP DMA SAS not supported\n"); 1916 return -EINVAL; 1917 } else { 1918 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1919 DRM_ERROR("CP DMA SAIC only supported for registers\n"); 1920 return -EINVAL; 1921 } 1922 /* src address space is memory */ 1923 r = r600_cs_packet_next_reloc(p, &reloc); 1924 if (r) { 1925 DRM_ERROR("bad CP DMA SRC\n"); 1926 return -EINVAL; 1927 } 1928 1929 tmp = radeon_get_ib_value(p, idx) + 1930 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1931 1932 offset = reloc->lobj.gpu_offset + tmp; 1933 1934 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1935 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", 1936 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1937 return -EINVAL; 1938 } 1939 1940 ib[idx] = offset; 1941 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1942 } 1943 if (command & PACKET3_CP_DMA_CMD_DAS) { 1944 /* dst address space is register */ 1945 DRM_ERROR("CP DMA DAS not supported\n"); 1946 return -EINVAL; 1947 } else { 1948 /* dst address space is memory */ 1949 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1950 DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1951 return -EINVAL; 1952 } 1953 r = r600_cs_packet_next_reloc(p, &reloc); 1954 if (r) { 1955 DRM_ERROR("bad CP DMA DST\n"); 1956 return -EINVAL; 1957 } 1958 1959 tmp = radeon_get_ib_value(p, idx+2) + 1960 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1961 1962 offset = reloc->lobj.gpu_offset + tmp; 1963 1964 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1965 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", 1966 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1967 return -EINVAL; 1968 } 1969 1970 ib[idx+2] = offset; 1971 ib[idx+3] = upper_32_bits(offset) & 0xff; 1972 } 1973 break; 1974 } 1975 case PACKET3_SURFACE_SYNC: 1976 if (pkt->count != 3) { 1977 DRM_ERROR("bad SURFACE_SYNC\n"); 1978 return -EINVAL; 1979 } 1980 /* 0xffffffff/0x0 is flush all cache flag */ 1981 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1982 radeon_get_ib_value(p, idx + 2) != 0) { 1983 r = r600_cs_packet_next_reloc(p, &reloc); 1984 if (r) { 1985 DRM_ERROR("bad SURFACE_SYNC\n"); 1986 return -EINVAL; 1987 } 1988 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1989 } 1990 break; 1991 case PACKET3_EVENT_WRITE: 1992 if (pkt->count != 2 && pkt->count != 0) { 1993 DRM_ERROR("bad EVENT_WRITE\n"); 1994 return -EINVAL; 1995 } 1996 if (pkt->count) { 1997 uint64_t offset; 1998 1999 r = r600_cs_packet_next_reloc(p, &reloc); 2000 if (r) { 2001 DRM_ERROR("bad EVENT_WRITE\n"); 2002 return -EINVAL; 2003 } 2004 offset = reloc->lobj.gpu_offset + 2005 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 2006 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2007 2008 ib[idx+1] = offset & 0xfffffff8; 2009 ib[idx+2] = upper_32_bits(offset) & 0xff; 2010 } 2011 break; 2012 case PACKET3_EVENT_WRITE_EOP: 2013 { 2014 uint64_t offset; 2015 2016 if (pkt->count != 4) { 2017 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2018 return -EINVAL; 2019 } 2020 r = r600_cs_packet_next_reloc(p, &reloc); 2021 if (r) { 2022 DRM_ERROR("bad EVENT_WRITE\n"); 2023 return -EINVAL; 2024 } 2025 2026 offset = reloc->lobj.gpu_offset + 2027 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2028 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2029 2030 ib[idx+1] = offset & 0xfffffffc; 2031 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 2032 break; 2033 } 2034 case PACKET3_SET_CONFIG_REG: 2035 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 2036 end_reg = 4 * pkt->count + start_reg - 4; 2037 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 2038 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 2039 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 2040 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2041 return -EINVAL; 2042 } 2043 for (i = 0; i < pkt->count; i++) { 2044 reg = start_reg + (4 * i); 2045 r = r600_cs_check_reg(p, reg, idx+1+i); 2046 if (r) 2047 return r; 2048 } 2049 break; 2050 case PACKET3_SET_CONTEXT_REG: 2051 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 2052 end_reg = 4 * pkt->count + start_reg - 4; 2053 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 2054 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 2055 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 2056 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2057 return -EINVAL; 2058 } 2059 for (i = 0; i < pkt->count; i++) { 2060 reg = start_reg + (4 * i); 2061 r = r600_cs_check_reg(p, reg, idx+1+i); 2062 if (r) 2063 return r; 2064 } 2065 break; 2066 case PACKET3_SET_RESOURCE: 2067 if (pkt->count % 7) { 2068 DRM_ERROR("bad SET_RESOURCE\n"); 2069 return -EINVAL; 2070 } 2071 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 2072 end_reg = 4 * pkt->count + start_reg - 4; 2073 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 2074 (start_reg >= PACKET3_SET_RESOURCE_END) || 2075 (end_reg >= PACKET3_SET_RESOURCE_END)) { 2076 DRM_ERROR("bad SET_RESOURCE\n"); 2077 return -EINVAL; 2078 } 2079 for (i = 0; i < (pkt->count / 7); i++) { 2080 struct radeon_bo *texture, *mipmap; 2081 u32 size, offset, base_offset, mip_offset; 2082 2083 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 2084 case SQ_TEX_VTX_VALID_TEXTURE: 2085 /* tex base */ 2086 r = r600_cs_packet_next_reloc(p, &reloc); 2087 if (r) { 2088 DRM_ERROR("bad SET_RESOURCE\n"); 2089 return -EINVAL; 2090 } 2091 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2092 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 2093 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 2094 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 2095 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 2096 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 2097 } 2098 texture = reloc->robj; 2099 /* tex mip base */ 2100 r = r600_cs_packet_next_reloc(p, &reloc); 2101 if (r) { 2102 DRM_ERROR("bad SET_RESOURCE\n"); 2103 return -EINVAL; 2104 } 2105 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2106 mipmap = reloc->robj; 2107 r = r600_check_texture_resource(p, idx+(i*7)+1, 2108 texture, mipmap, 2109 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 2110 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 2111 reloc->lobj.tiling_flags); 2112 if (r) 2113 return r; 2114 ib[idx+1+(i*7)+2] += base_offset; 2115 ib[idx+1+(i*7)+3] += mip_offset; 2116 break; 2117 case SQ_TEX_VTX_VALID_BUFFER: 2118 { 2119 uint64_t offset64; 2120 /* vtx base */ 2121 r = r600_cs_packet_next_reloc(p, &reloc); 2122 if (r) { 2123 DRM_ERROR("bad SET_RESOURCE\n"); 2124 return -EINVAL; 2125 } 2126 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2127 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2128 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2129 /* force size to size of the buffer */ 2130 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2131 size + offset, radeon_bo_size(reloc->robj)); 2132 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2133 } 2134 2135 offset64 = reloc->lobj.gpu_offset + offset; 2136 ib[idx+1+(i*8)+0] = offset64; 2137 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2138 (upper_32_bits(offset64) & 0xff); 2139 break; 2140 } 2141 case SQ_TEX_VTX_INVALID_TEXTURE: 2142 case SQ_TEX_VTX_INVALID_BUFFER: 2143 default: 2144 DRM_ERROR("bad SET_RESOURCE\n"); 2145 return -EINVAL; 2146 } 2147 } 2148 break; 2149 case PACKET3_SET_ALU_CONST: 2150 if (track->sq_config & DX9_CONSTS) { 2151 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 2152 end_reg = 4 * pkt->count + start_reg - 4; 2153 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2154 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2155 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2156 DRM_ERROR("bad SET_ALU_CONST\n"); 2157 return -EINVAL; 2158 } 2159 } 2160 break; 2161 case PACKET3_SET_BOOL_CONST: 2162 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 2163 end_reg = 4 * pkt->count + start_reg - 4; 2164 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2165 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2166 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2167 DRM_ERROR("bad SET_BOOL_CONST\n"); 2168 return -EINVAL; 2169 } 2170 break; 2171 case PACKET3_SET_LOOP_CONST: 2172 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 2173 end_reg = 4 * pkt->count + start_reg - 4; 2174 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2175 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2176 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2177 DRM_ERROR("bad SET_LOOP_CONST\n"); 2178 return -EINVAL; 2179 } 2180 break; 2181 case PACKET3_SET_CTL_CONST: 2182 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 2183 end_reg = 4 * pkt->count + start_reg - 4; 2184 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2185 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2186 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2187 DRM_ERROR("bad SET_CTL_CONST\n"); 2188 return -EINVAL; 2189 } 2190 break; 2191 case PACKET3_SET_SAMPLER: 2192 if (pkt->count % 3) { 2193 DRM_ERROR("bad SET_SAMPLER\n"); 2194 return -EINVAL; 2195 } 2196 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 2197 end_reg = 4 * pkt->count + start_reg - 4; 2198 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2199 (start_reg >= PACKET3_SET_SAMPLER_END) || 2200 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2201 DRM_ERROR("bad SET_SAMPLER\n"); 2202 return -EINVAL; 2203 } 2204 break; 2205 case PACKET3_STRMOUT_BASE_UPDATE: 2206 /* RS780 and RS880 also need this */ 2207 if (p->family < CHIP_RS780) { 2208 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2209 return -EINVAL; 2210 } 2211 if (pkt->count != 1) { 2212 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); 2213 return -EINVAL; 2214 } 2215 if (idx_value > 3) { 2216 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); 2217 return -EINVAL; 2218 } 2219 { 2220 u64 offset; 2221 2222 r = r600_cs_packet_next_reloc(p, &reloc); 2223 if (r) { 2224 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2225 return -EINVAL; 2226 } 2227 2228 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2229 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2230 return -EINVAL; 2231 } 2232 2233 offset = radeon_get_ib_value(p, idx+1) << 8; 2234 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2235 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n", 2236 (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]); 2237 return -EINVAL; 2238 } 2239 2240 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2241 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n", 2242 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2243 return -EINVAL; 2244 } 2245 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2246 } 2247 break; 2248 case PACKET3_SURFACE_BASE_UPDATE: 2249 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2250 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2251 return -EINVAL; 2252 } 2253 if (pkt->count) { 2254 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2255 return -EINVAL; 2256 } 2257 break; 2258 case PACKET3_STRMOUT_BUFFER_UPDATE: 2259 if (pkt->count != 4) { 2260 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2261 return -EINVAL; 2262 } 2263 /* Updating memory at DST_ADDRESS. */ 2264 if (idx_value & 0x1) { 2265 u64 offset; 2266 r = r600_cs_packet_next_reloc(p, &reloc); 2267 if (r) { 2268 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2269 return -EINVAL; 2270 } 2271 offset = radeon_get_ib_value(p, idx+1); 2272 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2273 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2274 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", 2275 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2276 return -EINVAL; 2277 } 2278 offset += reloc->lobj.gpu_offset; 2279 ib[idx+1] = offset; 2280 ib[idx+2] = upper_32_bits(offset) & 0xff; 2281 } 2282 /* Reading data from SRC_ADDRESS. */ 2283 if (((idx_value >> 1) & 0x3) == 2) { 2284 u64 offset; 2285 r = r600_cs_packet_next_reloc(p, &reloc); 2286 if (r) { 2287 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2288 return -EINVAL; 2289 } 2290 offset = radeon_get_ib_value(p, idx+3); 2291 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2292 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2293 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", 2294 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2295 return -EINVAL; 2296 } 2297 offset += reloc->lobj.gpu_offset; 2298 ib[idx+3] = offset; 2299 ib[idx+4] = upper_32_bits(offset) & 0xff; 2300 } 2301 break; 2302 case PACKET3_MEM_WRITE: 2303 { 2304 u64 offset; 2305 2306 if (pkt->count != 3) { 2307 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2308 return -EINVAL; 2309 } 2310 r = r600_cs_packet_next_reloc(p, &reloc); 2311 if (r) { 2312 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2313 return -EINVAL; 2314 } 2315 offset = radeon_get_ib_value(p, idx+0); 2316 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2317 if (offset & 0x7) { 2318 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2319 return -EINVAL; 2320 } 2321 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2322 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", 2323 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); 2324 return -EINVAL; 2325 } 2326 offset += reloc->lobj.gpu_offset; 2327 ib[idx+0] = offset; 2328 ib[idx+1] = upper_32_bits(offset) & 0xff; 2329 break; 2330 } 2331 case PACKET3_COPY_DW: 2332 if (pkt->count != 4) { 2333 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2334 return -EINVAL; 2335 } 2336 if (idx_value & 0x1) { 2337 u64 offset; 2338 /* SRC is memory. */ 2339 r = r600_cs_packet_next_reloc(p, &reloc); 2340 if (r) { 2341 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2342 return -EINVAL; 2343 } 2344 offset = radeon_get_ib_value(p, idx+1); 2345 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2346 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2347 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", 2348 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2349 return -EINVAL; 2350 } 2351 offset += reloc->lobj.gpu_offset; 2352 ib[idx+1] = offset; 2353 ib[idx+2] = upper_32_bits(offset) & 0xff; 2354 } else { 2355 /* SRC is a reg. */ 2356 reg = radeon_get_ib_value(p, idx+1) << 2; 2357 if (!r600_is_safe_reg(p, reg, idx+1)) 2358 return -EINVAL; 2359 } 2360 if (idx_value & 0x2) { 2361 u64 offset; 2362 /* DST is memory. */ 2363 r = r600_cs_packet_next_reloc(p, &reloc); 2364 if (r) { 2365 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2366 return -EINVAL; 2367 } 2368 offset = radeon_get_ib_value(p, idx+3); 2369 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2370 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2371 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", 2372 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2373 return -EINVAL; 2374 } 2375 offset += reloc->lobj.gpu_offset; 2376 ib[idx+3] = offset; 2377 ib[idx+4] = upper_32_bits(offset) & 0xff; 2378 } else { 2379 /* DST is a reg. */ 2380 reg = radeon_get_ib_value(p, idx+3) << 2; 2381 if (!r600_is_safe_reg(p, reg, idx+3)) 2382 return -EINVAL; 2383 } 2384 break; 2385 case PACKET3_NOP: 2386 break; 2387 default: 2388 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2389 return -EINVAL; 2390 } 2391 return 0; 2392} 2393 2394int r600_cs_parse(struct radeon_cs_parser *p) 2395{ 2396 struct radeon_cs_packet pkt; 2397 struct r600_cs_track *track; 2398 int r; 2399 2400 if (p->track == NULL) { 2401 /* initialize tracker, we are in kms */ 2402 track = malloc(sizeof(*track), 2403 DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 2404 if (track == NULL) 2405 return -ENOMEM; 2406 r600_cs_track_init(track); 2407 if (p->rdev->family < CHIP_RV770) { 2408 track->npipes = p->rdev->config.r600.tiling_npipes; 2409 track->nbanks = p->rdev->config.r600.tiling_nbanks; 2410 track->group_size = p->rdev->config.r600.tiling_group_size; 2411 } else if (p->rdev->family <= CHIP_RV740) { 2412 track->npipes = p->rdev->config.rv770.tiling_npipes; 2413 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 2414 track->group_size = p->rdev->config.rv770.tiling_group_size; 2415 } 2416 p->track = track; 2417 } 2418 do { 2419 r = r600_cs_packet_parse(p, &pkt, p->idx); 2420 if (r) { 2421 free(p->track, DRM_MEM_DRIVER); 2422 p->track = NULL; 2423 return r; 2424 } 2425 p->idx += pkt.count + 2; 2426 switch (pkt.type) { 2427 case PACKET_TYPE0: 2428 r = r600_cs_parse_packet0(p, &pkt); 2429 break; 2430 case PACKET_TYPE2: 2431 break; 2432 case PACKET_TYPE3: 2433 r = r600_packet3_check(p, &pkt); 2434 break; 2435 default: 2436 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2437 free(p->track, DRM_MEM_DRIVER); 2438 p->track = NULL; 2439 return -EINVAL; 2440 } 2441 if (r) { 2442 free(p->track, DRM_MEM_DRIVER); 2443 p->track = NULL; 2444 return r; 2445 } 2446 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2447#if 0 2448 for (r = 0; r < p->ib.length_dw; r++) { 2449 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2450 DRM_MDELAY(1); 2451 } 2452#endif 2453 free(p->track, DRM_MEM_DRIVER); 2454 p->track = NULL; 2455 return 0; 2456} 2457 2458static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2459{ 2460 if (p->chunk_relocs_idx == -1) { 2461 return 0; 2462 } 2463 p->relocs = malloc(sizeof(struct radeon_cs_reloc), 2464 DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 2465 if (p->relocs == NULL) { 2466 return -ENOMEM; 2467 } 2468 return 0; 2469} 2470 2471/** 2472 * cs_parser_fini() - clean parser states 2473 * @parser: parser structure holding parsing context. 2474 * @error: error number 2475 * 2476 * If error is set than unvalidate buffer, otherwise just free memory 2477 * used by parsing context. 2478 **/ 2479static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) 2480{ 2481 unsigned i; 2482 2483 free(parser->relocs, DRM_MEM_DRIVER); 2484 for (i = 0; i < parser->nchunks; i++) { 2485 free(parser->chunks[i].kdata, DRM_MEM_DRIVER); 2486 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { 2487 free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER); 2488 free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER); 2489 } 2490 } 2491 free(parser->chunks, DRM_MEM_DRIVER); 2492 free(parser->chunks_array, DRM_MEM_DRIVER); 2493 free(parser->track, DRM_MEM_DRIVER); 2494} 2495 2496int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 2497 unsigned family, u32 *ib, int *l) 2498{ 2499 struct radeon_cs_parser parser; 2500 struct radeon_cs_chunk *ib_chunk; 2501 struct r600_cs_track *track; 2502 int r; 2503 2504 /* initialize tracker */ 2505 track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 2506 if (track == NULL) 2507 return -ENOMEM; 2508 r600_cs_track_init(track); 2509 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); 2510 /* initialize parser */ 2511 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 2512 parser.filp = filp; 2513 parser.dev = dev->device; 2514 parser.rdev = NULL; 2515 parser.family = family; 2516 parser.track = track; 2517 parser.ib.ptr = ib; 2518 r = radeon_cs_parser_init(&parser, data); 2519 if (r) { 2520 DRM_ERROR("Failed to initialize parser !\n"); 2521 r600_cs_parser_fini(&parser, r); 2522 return r; 2523 } 2524 r = r600_cs_parser_relocs_legacy(&parser); 2525 if (r) { 2526 DRM_ERROR("Failed to parse relocation !\n"); 2527 r600_cs_parser_fini(&parser, r); 2528 return r; 2529 } 2530 /* Copy the packet into the IB, the parser will read from the 2531 * input memory (cached) and write to the IB (which can be 2532 * uncached). */ 2533 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2534 parser.ib.length_dw = ib_chunk->length_dw; 2535 *l = parser.ib.length_dw; 2536 r = r600_cs_parse(&parser); 2537 if (r) { 2538 DRM_ERROR("Invalid command stream !\n"); 2539 r600_cs_parser_fini(&parser, r); 2540 return r; 2541 } 2542 r = radeon_cs_finish_pages(&parser); 2543 if (r) { 2544 DRM_ERROR("Invalid command stream !\n"); 2545 r600_cs_parser_fini(&parser, r); 2546 return r; 2547 } 2548 r600_cs_parser_fini(&parser, r); 2549 return r; 2550} 2551 2552void r600_cs_legacy_init(void) 2553{ 2554 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2555} 2556 2557/* 2558 * DMA 2559 */ 2560/** 2561 * r600_dma_cs_next_reloc() - parse next reloc 2562 * @p: parser structure holding parsing context. 2563 * @cs_reloc: reloc informations 2564 * 2565 * Return the next reloc, do bo validation and compute 2566 * GPU offset using the provided start. 2567 **/ 2568int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2569 struct radeon_cs_reloc **cs_reloc) 2570{ 2571 struct radeon_cs_chunk *relocs_chunk; 2572 unsigned idx; 2573 2574 *cs_reloc = NULL; 2575 if (p->chunk_relocs_idx == -1) { 2576 DRM_ERROR("No relocation chunk !\n"); 2577 return -EINVAL; 2578 } 2579 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2580 idx = p->dma_reloc_idx; 2581 if (idx >= p->nrelocs) { 2582 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2583 idx, p->nrelocs); 2584 return -EINVAL; 2585 } 2586 *cs_reloc = p->relocs_ptr[idx]; 2587 p->dma_reloc_idx++; 2588 return 0; 2589} 2590 2591#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) 2592#define GET_DMA_COUNT(h) ((h) & 0x0000ffff) 2593#define GET_DMA_T(h) (((h) & 0x00800000) >> 23) 2594 2595/** 2596 * r600_dma_cs_parse() - parse the DMA IB 2597 * @p: parser structure holding parsing context. 2598 * 2599 * Parses the DMA IB from the CS ioctl and updates 2600 * the GPU addresses based on the reloc information and 2601 * checks for errors. (R6xx-R7xx) 2602 * Returns 0 for success and an error on failure. 2603 **/ 2604int r600_dma_cs_parse(struct radeon_cs_parser *p) 2605{ 2606 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2607 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2608 u32 header, cmd, count, tiled; 2609 volatile u32 *ib = p->ib.ptr; 2610 u32 idx, idx_value; 2611 u64 src_offset, dst_offset; 2612 int r; 2613 2614 do { 2615 if (p->idx >= ib_chunk->length_dw) { 2616 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2617 p->idx, ib_chunk->length_dw); 2618 return -EINVAL; 2619 } 2620 idx = p->idx; 2621 header = radeon_get_ib_value(p, idx); 2622 cmd = GET_DMA_CMD(header); 2623 count = GET_DMA_COUNT(header); 2624 tiled = GET_DMA_T(header); 2625 2626 switch (cmd) { 2627 case DMA_PACKET_WRITE: 2628 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2629 if (r) { 2630 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2631 return -EINVAL; 2632 } 2633 if (tiled) { 2634 dst_offset = radeon_get_ib_value(p, idx+1); 2635 dst_offset <<= 8; 2636 2637 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2638 p->idx += count + 5; 2639 } else { 2640 dst_offset = radeon_get_ib_value(p, idx+1); 2641 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2642 2643 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2644 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2645 p->idx += count + 3; 2646 } 2647 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2648 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", 2649 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2650 return -EINVAL; 2651 } 2652 break; 2653 case DMA_PACKET_COPY: 2654 r = r600_dma_cs_next_reloc(p, &src_reloc); 2655 if (r) { 2656 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2657 return -EINVAL; 2658 } 2659 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2660 if (r) { 2661 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2662 return -EINVAL; 2663 } 2664 if (tiled) { 2665 idx_value = radeon_get_ib_value(p, idx + 2); 2666 /* detile bit */ 2667 if (idx_value & (1 << 31)) { 2668 /* tiled src, linear dst */ 2669 src_offset = radeon_get_ib_value(p, idx+1); 2670 src_offset <<= 8; 2671 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2672 2673 dst_offset = radeon_get_ib_value(p, idx+5); 2674 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2675 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2676 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2677 } else { 2678 /* linear src, tiled dst */ 2679 src_offset = radeon_get_ib_value(p, idx+5); 2680 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2681 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2682 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2683 2684 dst_offset = radeon_get_ib_value(p, idx+1); 2685 dst_offset <<= 8; 2686 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2687 } 2688 p->idx += 7; 2689 } else { 2690 if (p->family >= CHIP_RV770) { 2691 src_offset = radeon_get_ib_value(p, idx+2); 2692 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2693 dst_offset = radeon_get_ib_value(p, idx+1); 2694 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2695 2696 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2697 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2698 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2699 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2700 p->idx += 5; 2701 } else { 2702 src_offset = radeon_get_ib_value(p, idx+2); 2703 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2704 dst_offset = radeon_get_ib_value(p, idx+1); 2705 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2706 2707 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2708 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2709 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2710 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; 2711 p->idx += 4; 2712 } 2713 } 2714 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2715 dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n", 2716 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2717 return -EINVAL; 2718 } 2719 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2720 dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n", 2721 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2722 return -EINVAL; 2723 } 2724 break; 2725 case DMA_PACKET_CONSTANT_FILL: 2726 if (p->family < CHIP_RV770) { 2727 DRM_ERROR("Constant Fill is 7xx only !\n"); 2728 return -EINVAL; 2729 } 2730 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2731 if (r) { 2732 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2733 return -EINVAL; 2734 } 2735 dst_offset = radeon_get_ib_value(p, idx+1); 2736 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2737 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2738 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", 2739 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2740 return -EINVAL; 2741 } 2742 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2743 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 2744 p->idx += 4; 2745 break; 2746 case DMA_PACKET_NOP: 2747 p->idx += 1; 2748 break; 2749 default: 2750 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2751 return -EINVAL; 2752 } 2753 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2754#if 0 2755 for (r = 0; r < p->ib->length_dw; r++) { 2756 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2757 DRM_MDELAY(1); 2758 } 2759#endif 2760 return 0; 2761} 2762