1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <dev/drm2/drmP.h> 33#include "radeon.h" 34#include "radeon_asic.h" 35#include "r600d.h" 36#include "r600_reg_safe.h" 37#include "r600_cp.h" 38#include "r600_cs.h" 39 40static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 41 struct radeon_cs_reloc **cs_reloc); 42typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 43static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 44#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */ 45extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); 46#endif 47 48 49struct r600_cs_track { 50 /* configuration we miror so that we use same code btw kms/ums */ 51 u32 group_size; 52 u32 nbanks; 53 u32 npipes; 54 /* value we track */ 55 u32 sq_config; 56 u32 log_nsamples; 57 u32 nsamples; 58 u32 cb_color_base_last[8]; 59 struct radeon_bo *cb_color_bo[8]; 60 u64 cb_color_bo_mc[8]; 61 u64 cb_color_bo_offset[8]; 62 struct radeon_bo *cb_color_frag_bo[8]; 63 u64 cb_color_frag_offset[8]; 64 struct radeon_bo *cb_color_tile_bo[8]; 65 u64 cb_color_tile_offset[8]; 66 u32 cb_color_mask[8]; 67 u32 cb_color_info[8]; 68 u32 cb_color_view[8]; 69 u32 cb_color_size_idx[8]; /* unused */ 70 u32 cb_target_mask; 71 u32 cb_shader_mask; /* unused */ 72 bool is_resolve; 73 u32 cb_color_size[8]; 74 u32 vgt_strmout_en; 75 u32 vgt_strmout_buffer_en; 76 struct radeon_bo *vgt_strmout_bo[4]; 77 u64 vgt_strmout_bo_mc[4]; /* unused */ 78 u32 vgt_strmout_bo_offset[4]; 79 u32 vgt_strmout_size[4]; 80 u32 db_depth_control; 81 u32 db_depth_info; 82 u32 db_depth_size_idx; 83 u32 db_depth_view; 84 u32 db_depth_size; 85 u32 db_offset; 86 struct radeon_bo *db_bo; 87 u64 db_bo_mc; 88 bool sx_misc_kill_all_prims; 89 bool cb_dirty; 90 bool db_dirty; 91 bool streamout_dirty; 92 struct radeon_bo *htile_bo; 93 u64 htile_offset; 94 u32 htile_surface; 95}; 96 97#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 98#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 99#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } 100#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 101#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } 102#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 103#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 104#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 105 106struct gpu_formats { 107 unsigned blockwidth; 108 unsigned blockheight; 109 unsigned blocksize; 110 unsigned valid_color; 111 enum radeon_family min_family; 112}; 113 114static const struct gpu_formats color_formats_table[] = { 115 /* 8 bit */ 116 FMT_8_BIT(V_038004_COLOR_8, 1), 117 FMT_8_BIT(V_038004_COLOR_4_4, 1), 118 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 119 FMT_8_BIT(V_038004_FMT_1, 0), 120 121 /* 16-bit */ 122 FMT_16_BIT(V_038004_COLOR_16, 1), 123 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 124 FMT_16_BIT(V_038004_COLOR_8_8, 1), 125 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 126 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 127 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 128 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 129 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 130 131 /* 24-bit */ 132 FMT_24_BIT(V_038004_FMT_8_8_8), 133 134 /* 32-bit */ 135 FMT_32_BIT(V_038004_COLOR_32, 1), 136 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 137 FMT_32_BIT(V_038004_COLOR_16_16, 1), 138 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 139 FMT_32_BIT(V_038004_COLOR_8_24, 1), 140 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 141 FMT_32_BIT(V_038004_COLOR_24_8, 1), 142 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 143 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 144 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 145 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 146 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 147 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 148 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 149 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 150 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 151 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 152 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 153 154 /* 48-bit */ 155 FMT_48_BIT(V_038004_FMT_16_16_16), 156 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 157 158 /* 64-bit */ 159 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 160 FMT_64_BIT(V_038004_COLOR_32_32, 1), 161 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 162 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 163 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 164 165 FMT_96_BIT(V_038004_FMT_32_32_32), 166 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 167 168 /* 128-bit */ 169 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 170 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 171 172 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 173 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 174 175 /* block compressed formats */ 176 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 177 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 178 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 179 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 180 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 181 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 182 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 183 184 /* The other Evergreen formats */ 185 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 186}; 187 188bool r600_fmt_is_valid_color(u32 format) 189{ 190 if (format >= ARRAY_SIZE(color_formats_table)) 191 return false; 192 193 if (color_formats_table[format].valid_color) 194 return true; 195 196 return false; 197} 198 199bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) 200{ 201 if (format >= ARRAY_SIZE(color_formats_table)) 202 return false; 203 204 if (family < color_formats_table[format].min_family) 205 return false; 206 207 if (color_formats_table[format].blockwidth > 0) 208 return true; 209 210 return false; 211} 212 213int r600_fmt_get_blocksize(u32 format) 214{ 215 if (format >= ARRAY_SIZE(color_formats_table)) 216 return 0; 217 218 return color_formats_table[format].blocksize; 219} 220 221int r600_fmt_get_nblocksx(u32 format, u32 w) 222{ 223 unsigned bw; 224 225 if (format >= ARRAY_SIZE(color_formats_table)) 226 return 0; 227 228 bw = color_formats_table[format].blockwidth; 229 if (bw == 0) 230 return 0; 231 232 return (w + bw - 1) / bw; 233} 234 235int r600_fmt_get_nblocksy(u32 format, u32 h) 236{ 237 unsigned bh; 238 239 if (format >= ARRAY_SIZE(color_formats_table)) 240 return 0; 241 242 bh = color_formats_table[format].blockheight; 243 if (bh == 0) 244 return 0; 245 246 return (h + bh - 1) / bh; 247} 248 249struct array_mode_checker { 250 int array_mode; 251 u32 group_size; 252 u32 nbanks; 253 u32 npipes; 254 u32 nsamples; 255 u32 blocksize; 256}; 257 258/* returns alignment in pixels for pitch/height/depth and bytes for base */ 259static int r600_get_array_mode_alignment(struct array_mode_checker *values, 260 u32 *pitch_align, 261 u32 *height_align, 262 u32 *depth_align, 263 u64 *base_align) 264{ 265 u32 tile_width = 8; 266 u32 tile_height = 8; 267 u32 macro_tile_width = values->nbanks; 268 u32 macro_tile_height = values->npipes; 269 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 270 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 271 272 switch (values->array_mode) { 273 case ARRAY_LINEAR_GENERAL: 274 /* technically tile_width/_height for pitch/height */ 275 *pitch_align = 1; /* tile_width */ 276 *height_align = 1; /* tile_height */ 277 *depth_align = 1; 278 *base_align = 1; 279 break; 280 case ARRAY_LINEAR_ALIGNED: 281 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 282 *height_align = 1; 283 *depth_align = 1; 284 *base_align = values->group_size; 285 break; 286 case ARRAY_1D_TILED_THIN1: 287 *pitch_align = max((u32)tile_width, 288 (u32)(values->group_size / 289 (tile_height * values->blocksize * values->nsamples))); 290 *height_align = tile_height; 291 *depth_align = 1; 292 *base_align = values->group_size; 293 break; 294 case ARRAY_2D_TILED_THIN1: 295 *pitch_align = max((u32)macro_tile_width * tile_width, 296 (u32)((values->group_size * values->nbanks) / 297 (values->blocksize * values->nsamples * tile_width))); 298 *height_align = macro_tile_height * tile_height; 299 *depth_align = 1; 300 *base_align = max(macro_tile_bytes, 301 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 302 break; 303 default: 304 return -EINVAL; 305 } 306 307 return 0; 308} 309 310static void r600_cs_track_init(struct r600_cs_track *track) 311{ 312 int i; 313 314 /* assume DX9 mode */ 315 track->sq_config = DX9_CONSTS; 316 for (i = 0; i < 8; i++) { 317 track->cb_color_base_last[i] = 0; 318 track->cb_color_size[i] = 0; 319 track->cb_color_size_idx[i] = 0; 320 track->cb_color_info[i] = 0; 321 track->cb_color_view[i] = 0xFFFFFFFF; 322 track->cb_color_bo[i] = NULL; 323 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 324 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 325 track->cb_color_frag_bo[i] = NULL; 326 track->cb_color_frag_offset[i] = 0xFFFFFFFF; 327 track->cb_color_tile_bo[i] = NULL; 328 track->cb_color_tile_offset[i] = 0xFFFFFFFF; 329 track->cb_color_mask[i] = 0xFFFFFFFF; 330 } 331 track->is_resolve = false; 332 track->nsamples = 16; 333 track->log_nsamples = 4; 334 track->cb_target_mask = 0xFFFFFFFF; 335 track->cb_shader_mask = 0xFFFFFFFF; 336 track->cb_dirty = true; 337 track->db_bo = NULL; 338 track->db_bo_mc = 0xFFFFFFFF; 339 /* assume the biggest format and that htile is enabled */ 340 track->db_depth_info = 7 | (1 << 25); 341 track->db_depth_view = 0xFFFFC000; 342 track->db_depth_size = 0xFFFFFFFF; 343 track->db_depth_size_idx = 0; 344 track->db_depth_control = 0xFFFFFFFF; 345 track->db_dirty = true; 346 track->htile_bo = NULL; 347 track->htile_offset = 0xFFFFFFFF; 348 track->htile_surface = 0; 349 350 for (i = 0; i < 4; i++) { 351 track->vgt_strmout_size[i] = 0; 352 track->vgt_strmout_bo[i] = NULL; 353 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; 354 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; 355 } 356 track->streamout_dirty = true; 357 track->sx_misc_kill_all_prims = false; 358} 359 360static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 361{ 362 struct r600_cs_track *track = p->track; 363 u32 slice_tile_max, size, tmp; 364 u32 height, height_align, pitch, pitch_align, depth_align; 365 u64 base_offset, base_align; 366 struct array_mode_checker array_check; 367 volatile u32 *ib = p->ib.ptr; 368 unsigned array_mode; 369 u32 format; 370 /* When resolve is used, the second colorbuffer has always 1 sample. */ 371 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; 372 373 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 374 format = G_0280A0_FORMAT(track->cb_color_info[i]); 375 if (!r600_fmt_is_valid_color(format)) { 376 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 377 __func__, __LINE__, format, 378 i, track->cb_color_info[i]); 379 return -EINVAL; 380 } 381 /* pitch in pixels */ 382 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 383 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 384 slice_tile_max *= 64; 385 height = slice_tile_max / pitch; 386 if (height > 8192) 387 height = 8192; 388 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 389 390 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 391 array_check.array_mode = array_mode; 392 array_check.group_size = track->group_size; 393 array_check.nbanks = track->nbanks; 394 array_check.npipes = track->npipes; 395 array_check.nsamples = nsamples; 396 array_check.blocksize = r600_fmt_get_blocksize(format); 397 if (r600_get_array_mode_alignment(&array_check, 398 &pitch_align, &height_align, &depth_align, &base_align)) { 399 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 400 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 401 track->cb_color_info[i]); 402 return -EINVAL; 403 } 404 switch (array_mode) { 405 case V_0280A0_ARRAY_LINEAR_GENERAL: 406 break; 407 case V_0280A0_ARRAY_LINEAR_ALIGNED: 408 break; 409 case V_0280A0_ARRAY_1D_TILED_THIN1: 410 /* avoid breaking userspace */ 411 if (height > 7) 412 height &= ~0x7; 413 break; 414 case V_0280A0_ARRAY_2D_TILED_THIN1: 415 break; 416 default: 417 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 418 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 419 track->cb_color_info[i]); 420 return -EINVAL; 421 } 422 423 if (!IS_ALIGNED(pitch, pitch_align)) { 424 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 425 __func__, __LINE__, pitch, pitch_align, array_mode); 426 return -EINVAL; 427 } 428 if (!IS_ALIGNED(height, height_align)) { 429 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 430 __func__, __LINE__, height, height_align, array_mode); 431 return -EINVAL; 432 } 433 if (!IS_ALIGNED(base_offset, base_align)) { 434 dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i, 435 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 436 return -EINVAL; 437 } 438 439 /* check offset */ 440 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 441 r600_fmt_get_blocksize(format) * nsamples; 442 switch (array_mode) { 443 default: 444 case V_0280A0_ARRAY_LINEAR_GENERAL: 445 case V_0280A0_ARRAY_LINEAR_ALIGNED: 446 tmp += track->cb_color_view[i] & 0xFF; 447 break; 448 case V_0280A0_ARRAY_1D_TILED_THIN1: 449 case V_0280A0_ARRAY_2D_TILED_THIN1: 450 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; 451 break; 452 } 453 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 454 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 455 /* the initial DDX does bad things with the CB size occasionally */ 456 /* it rounds up height too far for slice tile max but the BO is smaller */ 457 /* r600c,g also seem to flush at bad times in some apps resulting in 458 * bogus values here. So for linear just allow anything to avoid breaking 459 * broken userspace. 460 */ 461 } else { 462 dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n", 463 __func__, i, array_mode, 464 (uintmax_t)track->cb_color_bo_offset[i], tmp, 465 radeon_bo_size(track->cb_color_bo[i]), 466 pitch, height, r600_fmt_get_nblocksx(format, pitch), 467 r600_fmt_get_nblocksy(format, height), 468 r600_fmt_get_blocksize(format)); 469 return -EINVAL; 470 } 471 } 472 /* limit max tile */ 473 tmp = (height * pitch) >> 6; 474 if (tmp < slice_tile_max) 475 slice_tile_max = tmp; 476 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 477 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 478 ib[track->cb_color_size_idx[i]] = tmp; 479 480 /* FMASK/CMASK */ 481 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 482 case V_0280A0_TILE_DISABLE: 483 break; 484 case V_0280A0_FRAG_ENABLE: 485 if (track->nsamples > 1) { 486 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); 487 /* the tile size is 8x8, but the size is in units of bits. 488 * for bytes, do just * 8. */ 489 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); 490 491 if (bytes + track->cb_color_frag_offset[i] > 492 radeon_bo_size(track->cb_color_frag_bo[i])) { 493 dev_warn(p->dev, "%s FMASK_TILE_MAX too large " 494 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 495 __func__, tile_max, bytes, 496 (uintmax_t)track->cb_color_frag_offset[i], 497 radeon_bo_size(track->cb_color_frag_bo[i])); 498 return -EINVAL; 499 } 500 } 501 /* fall through */ 502 case V_0280A0_CLEAR_ENABLE: 503 { 504 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); 505 /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. 506 * (128*128) / (8*8) / 2 = 128 bytes per block. */ 507 uint32_t bytes = (block_max + 1) * 128; 508 509 if (bytes + track->cb_color_tile_offset[i] > 510 radeon_bo_size(track->cb_color_tile_bo[i])) { 511 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " 512 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 513 __func__, block_max, bytes, 514 (uintmax_t)track->cb_color_tile_offset[i], 515 radeon_bo_size(track->cb_color_tile_bo[i])); 516 return -EINVAL; 517 } 518 break; 519 } 520 default: 521 dev_warn(p->dev, "%s invalid tile mode\n", __func__); 522 return -EINVAL; 523 } 524 return 0; 525} 526 527static int r600_cs_track_validate_db(struct radeon_cs_parser *p) 528{ 529 struct r600_cs_track *track = p->track; 530 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; 531 u32 height_align, pitch_align, depth_align; 532 u32 pitch = 8192; 533 u32 height = 8192; 534 u64 base_offset, base_align; 535 struct array_mode_checker array_check; 536 int array_mode; 537 volatile u32 *ib = p->ib.ptr; 538 539 540 if (track->db_bo == NULL) { 541 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 542 return -EINVAL; 543 } 544 switch (G_028010_FORMAT(track->db_depth_info)) { 545 case V_028010_DEPTH_16: 546 bpe = 2; 547 break; 548 case V_028010_DEPTH_X8_24: 549 case V_028010_DEPTH_8_24: 550 case V_028010_DEPTH_X8_24_FLOAT: 551 case V_028010_DEPTH_8_24_FLOAT: 552 case V_028010_DEPTH_32_FLOAT: 553 bpe = 4; 554 break; 555 case V_028010_DEPTH_X24_8_32_FLOAT: 556 bpe = 8; 557 break; 558 default: 559 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 560 return -EINVAL; 561 } 562 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 563 if (!track->db_depth_size_idx) { 564 dev_warn(p->dev, "z/stencil buffer size not set\n"); 565 return -EINVAL; 566 } 567 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 568 tmp = (tmp / bpe) >> 6; 569 if (!tmp) { 570 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 571 track->db_depth_size, bpe, track->db_offset, 572 radeon_bo_size(track->db_bo)); 573 return -EINVAL; 574 } 575 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 576 } else { 577 size = radeon_bo_size(track->db_bo); 578 /* pitch in pixels */ 579 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 580 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 581 slice_tile_max *= 64; 582 height = slice_tile_max / pitch; 583 if (height > 8192) 584 height = 8192; 585 base_offset = track->db_bo_mc + track->db_offset; 586 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 587 array_check.array_mode = array_mode; 588 array_check.group_size = track->group_size; 589 array_check.nbanks = track->nbanks; 590 array_check.npipes = track->npipes; 591 array_check.nsamples = track->nsamples; 592 array_check.blocksize = bpe; 593 if (r600_get_array_mode_alignment(&array_check, 594 &pitch_align, &height_align, &depth_align, &base_align)) { 595 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 596 G_028010_ARRAY_MODE(track->db_depth_info), 597 track->db_depth_info); 598 return -EINVAL; 599 } 600 switch (array_mode) { 601 case V_028010_ARRAY_1D_TILED_THIN1: 602 /* don't break userspace */ 603 height &= ~0x7; 604 break; 605 case V_028010_ARRAY_2D_TILED_THIN1: 606 break; 607 default: 608 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 609 G_028010_ARRAY_MODE(track->db_depth_info), 610 track->db_depth_info); 611 return -EINVAL; 612 } 613 614 if (!IS_ALIGNED(pitch, pitch_align)) { 615 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 616 __func__, __LINE__, pitch, pitch_align, array_mode); 617 return -EINVAL; 618 } 619 if (!IS_ALIGNED(height, height_align)) { 620 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 621 __func__, __LINE__, height, height_align, array_mode); 622 return -EINVAL; 623 } 624 if (!IS_ALIGNED(base_offset, base_align)) { 625 dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__, 626 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 627 return -EINVAL; 628 } 629 630 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 631 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 632 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 633 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 634 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 635 array_mode, 636 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 637 radeon_bo_size(track->db_bo)); 638 return -EINVAL; 639 } 640 } 641 642 /* hyperz */ 643 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 644 unsigned long size; 645 unsigned nbx, nby; 646 647 if (track->htile_bo == NULL) { 648 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 649 __func__, __LINE__, track->db_depth_info); 650 return -EINVAL; 651 } 652 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 653 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 654 __func__, __LINE__, track->db_depth_size); 655 return -EINVAL; 656 } 657 658 nbx = pitch; 659 nby = height; 660 if (G_028D24_LINEAR(track->htile_surface)) { 661 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ 662 nbx = roundup2(nbx, 16 * 8); 663 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 664 nby = roundup(nby, track->npipes * 8); 665 } else { 666 /* always assume 8x8 htile */ 667 /* align is htile align * 8, htile align vary according to 668 * number of pipe and tile width and nby 669 */ 670 switch (track->npipes) { 671 case 8: 672 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 673 nbx = roundup2(nbx, 64 * 8); 674 nby = roundup2(nby, 64 * 8); 675 break; 676 case 4: 677 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 678 nbx = roundup2(nbx, 64 * 8); 679 nby = roundup2(nby, 32 * 8); 680 break; 681 case 2: 682 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 683 nbx = roundup2(nbx, 32 * 8); 684 nby = roundup2(nby, 32 * 8); 685 break; 686 case 1: 687 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 688 nbx = roundup2(nbx, 32 * 8); 689 nby = roundup2(nby, 16 * 8); 690 break; 691 default: 692 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 693 __func__, __LINE__, track->npipes); 694 return -EINVAL; 695 } 696 } 697 /* compute number of htile */ 698 nbx = nbx >> 3; 699 nby = nby >> 3; 700 /* size must be aligned on npipes * 2K boundary */ 701 size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); 702 size += track->htile_offset; 703 704 if (size > radeon_bo_size(track->htile_bo)) { 705 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 706 __func__, __LINE__, radeon_bo_size(track->htile_bo), 707 size, nbx, nby); 708 return -EINVAL; 709 } 710 } 711 712 track->db_dirty = false; 713 return 0; 714} 715 716static int r600_cs_track_check(struct radeon_cs_parser *p) 717{ 718 struct r600_cs_track *track = p->track; 719 u32 tmp; 720 int r, i; 721 722 /* on legacy kernel we don't perform advanced check */ 723 if (p->rdev == NULL) 724 return 0; 725 726 /* check streamout */ 727 if (track->streamout_dirty && track->vgt_strmout_en) { 728 for (i = 0; i < 4; i++) { 729 if (track->vgt_strmout_buffer_en & (1 << i)) { 730 if (track->vgt_strmout_bo[i]) { 731 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 732 (u64)track->vgt_strmout_size[i]; 733 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 734 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", 735 i, (uintmax_t)offset, 736 radeon_bo_size(track->vgt_strmout_bo[i])); 737 return -EINVAL; 738 } 739 } else { 740 dev_warn(p->dev, "No buffer for streamout %d\n", i); 741 return -EINVAL; 742 } 743 } 744 } 745 track->streamout_dirty = false; 746 } 747 748 if (track->sx_misc_kill_all_prims) 749 return 0; 750 751 /* check that we have a cb for each enabled target, we don't check 752 * shader_mask because it seems mesa isn't always setting it :( 753 */ 754 if (track->cb_dirty) { 755 tmp = track->cb_target_mask; 756 757 /* We must check both colorbuffers for RESOLVE. */ 758 if (track->is_resolve) { 759 tmp |= 0xff; 760 } 761 762 for (i = 0; i < 8; i++) { 763 if ((tmp >> (i * 4)) & 0xF) { 764 /* at least one component is enabled */ 765 if (track->cb_color_bo[i] == NULL) { 766 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 767 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 768 return -EINVAL; 769 } 770 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 771 r = r600_cs_track_validate_cb(p, i); 772 if (r) 773 return r; 774 } 775 } 776 track->cb_dirty = false; 777 } 778 779 /* Check depth buffer */ 780 if (track->db_dirty && 781 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && 782 (G_028800_STENCIL_ENABLE(track->db_depth_control) || 783 G_028800_Z_ENABLE(track->db_depth_control))) { 784 r = r600_cs_track_validate_db(p); 785 if (r) 786 return r; 787 } 788 789 return 0; 790} 791 792/** 793 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 794 * @parser: parser structure holding parsing context. 795 * @pkt: where to store packet informations 796 * 797 * Assume that chunk_ib_index is properly set. Will return -EINVAL 798 * if packet is bigger than remaining ib size. or if packets is unknown. 799 **/ 800static int r600_cs_packet_parse(struct radeon_cs_parser *p, 801 struct radeon_cs_packet *pkt, 802 unsigned idx) 803{ 804 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 805 uint32_t header; 806 807 if (idx >= ib_chunk->length_dw) { 808 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 809 idx, ib_chunk->length_dw); 810 return -EINVAL; 811 } 812 header = radeon_get_ib_value(p, idx); 813 pkt->idx = idx; 814 pkt->type = CP_PACKET_GET_TYPE(header); 815 pkt->count = CP_PACKET_GET_COUNT(header); 816 pkt->one_reg_wr = 0; 817 switch (pkt->type) { 818 case PACKET_TYPE0: 819 pkt->reg = CP_PACKET0_GET_REG(header); 820 break; 821 case PACKET_TYPE3: 822 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 823 break; 824 case PACKET_TYPE2: 825 pkt->count = -1; 826 break; 827 default: 828 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 829 return -EINVAL; 830 } 831 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 832 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 833 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 834 return -EINVAL; 835 } 836 return 0; 837} 838 839/** 840 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 841 * @parser: parser structure holding parsing context. 842 * @data: pointer to relocation data 843 * @offset_start: starting offset 844 * @offset_mask: offset mask (to align start offset on) 845 * @reloc: reloc informations 846 * 847 * Check next packet is relocation packet3, do bo validation and compute 848 * GPU offset using the provided start. 849 **/ 850static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 851 struct radeon_cs_reloc **cs_reloc) 852{ 853 struct radeon_cs_chunk *relocs_chunk; 854 struct radeon_cs_packet p3reloc; 855 unsigned idx; 856 int r; 857 858 if (p->chunk_relocs_idx == -1) { 859 DRM_ERROR("No relocation chunk !\n"); 860 return -EINVAL; 861 } 862 *cs_reloc = NULL; 863 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 864 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 865 if (r) { 866 return r; 867 } 868 p->idx += p3reloc.count + 2; 869 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 870 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 871 p3reloc.idx); 872 return -EINVAL; 873 } 874 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 875 if (idx >= relocs_chunk->length_dw) { 876 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 877 idx, relocs_chunk->length_dw); 878 return -EINVAL; 879 } 880 /* FIXME: we assume reloc size is 4 dwords */ 881 *cs_reloc = p->relocs_ptr[(idx / 4)]; 882 return 0; 883} 884 885/** 886 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 887 * @parser: parser structure holding parsing context. 888 * 889 * Check next packet is relocation packet3, do bo validation and compute 890 * GPU offset using the provided start. 891 **/ 892static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 893{ 894 struct radeon_cs_packet p3reloc; 895 int r; 896 897 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 898 if (r) { 899 return 0; 900 } 901 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 902 return 0; 903 } 904 return 1; 905} 906 907/** 908 * r600_cs_packet_next_vline() - parse userspace VLINE packet 909 * @parser: parser structure holding parsing context. 910 * 911 * Userspace sends a special sequence for VLINE waits. 912 * PACKET0 - VLINE_START_END + value 913 * PACKET3 - WAIT_REG_MEM poll vline status reg 914 * RELOC (P3) - crtc_id in reloc. 915 * 916 * This function parses this and relocates the VLINE START END 917 * and WAIT_REG_MEM packets to the correct crtc. 918 * It also detects a switched off crtc and nulls out the 919 * wait in that case. 920 */ 921static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 922{ 923 struct drm_mode_object *obj; 924 struct drm_crtc *crtc; 925 struct radeon_crtc *radeon_crtc; 926 struct radeon_cs_packet p3reloc, wait_reg_mem; 927 int crtc_id; 928 int r; 929 uint32_t header, h_idx, reg, wait_reg_mem_info; 930 volatile uint32_t *ib; 931 932 ib = p->ib.ptr; 933 934 /* parse the WAIT_REG_MEM */ 935 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 936 if (r) 937 return r; 938 939 /* check its a WAIT_REG_MEM */ 940 if (wait_reg_mem.type != PACKET_TYPE3 || 941 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 942 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 943 return -EINVAL; 944 } 945 946 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 947 /* bit 4 is reg (0) or mem (1) */ 948 if (wait_reg_mem_info & 0x10) { 949 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 950 return -EINVAL; 951 } 952 /* waiting for value to be equal */ 953 if ((wait_reg_mem_info & 0x7) != 0x3) { 954 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 955 return -EINVAL; 956 } 957 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 958 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 959 return -EINVAL; 960 } 961 962 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 963 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 964 return -EINVAL; 965 } 966 967 /* jump over the NOP */ 968 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 969 if (r) 970 return r; 971 972 h_idx = p->idx - 2; 973 p->idx += wait_reg_mem.count + 2; 974 p->idx += p3reloc.count + 2; 975 976 header = radeon_get_ib_value(p, h_idx); 977 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 978 reg = CP_PACKET0_GET_REG(header); 979 980 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 981 if (!obj) { 982 DRM_ERROR("cannot find crtc %d\n", crtc_id); 983 return -EINVAL; 984 } 985 crtc = obj_to_crtc(obj); 986 radeon_crtc = to_radeon_crtc(crtc); 987 crtc_id = radeon_crtc->crtc_id; 988 989 if (!crtc->enabled) { 990 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 991 ib[h_idx + 2] = PACKET2(0); 992 ib[h_idx + 3] = PACKET2(0); 993 ib[h_idx + 4] = PACKET2(0); 994 ib[h_idx + 5] = PACKET2(0); 995 ib[h_idx + 6] = PACKET2(0); 996 ib[h_idx + 7] = PACKET2(0); 997 ib[h_idx + 8] = PACKET2(0); 998 } else if (crtc_id == 1) { 999 switch (reg) { 1000 case AVIVO_D1MODE_VLINE_START_END: 1001 header &= ~R600_CP_PACKET0_REG_MASK; 1002 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1003 break; 1004 default: 1005 DRM_ERROR("unknown crtc reloc\n"); 1006 return -EINVAL; 1007 } 1008 ib[h_idx] = header; 1009 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 1010 } 1011 1012 return 0; 1013} 1014 1015static int r600_packet0_check(struct radeon_cs_parser *p, 1016 struct radeon_cs_packet *pkt, 1017 unsigned idx, unsigned reg) 1018{ 1019 int r; 1020 1021 switch (reg) { 1022 case AVIVO_D1MODE_VLINE_START_END: 1023 r = r600_cs_packet_parse_vline(p); 1024 if (r) { 1025 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1026 idx, reg); 1027 return r; 1028 } 1029 break; 1030 default: 1031 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", 1032 reg, idx); 1033 return -EINVAL; 1034 } 1035 return 0; 1036} 1037 1038static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 1039 struct radeon_cs_packet *pkt) 1040{ 1041 unsigned reg, i; 1042 unsigned idx; 1043 int r; 1044 1045 idx = pkt->idx + 1; 1046 reg = pkt->reg; 1047 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 1048 r = r600_packet0_check(p, pkt, idx, reg); 1049 if (r) { 1050 return r; 1051 } 1052 } 1053 return 0; 1054} 1055 1056/** 1057 * r600_cs_check_reg() - check if register is authorized or not 1058 * @parser: parser structure holding parsing context 1059 * @reg: register we are testing 1060 * @idx: index into the cs buffer 1061 * 1062 * This function will test against r600_reg_safe_bm and return 0 1063 * if register is safe. If register is not flag as safe this function 1064 * will test it against a list of register needind special handling. 1065 */ 1066static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1067{ 1068 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 1069 struct radeon_cs_reloc *reloc; 1070 u32 m, i, tmp, *ib; 1071 int r; 1072 1073 i = (reg >> 7); 1074 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1075 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1076 return -EINVAL; 1077 } 1078 m = 1 << ((reg >> 2) & 31); 1079 if (!(r600_reg_safe_bm[i] & m)) 1080 return 0; 1081 ib = p->ib.ptr; 1082 switch (reg) { 1083 /* force following reg to 0 in an attempt to disable out buffer 1084 * which will need us to better understand how it works to perform 1085 * security check on it (Jerome) 1086 */ 1087 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 1088 case R_008C44_SQ_ESGS_RING_SIZE: 1089 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 1090 case R_008C54_SQ_ESTMP_RING_SIZE: 1091 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 1092 case R_008C74_SQ_FBUF_RING_SIZE: 1093 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 1094 case R_008C5C_SQ_GSTMP_RING_SIZE: 1095 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 1096 case R_008C4C_SQ_GSVS_RING_SIZE: 1097 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 1098 case R_008C6C_SQ_PSTMP_RING_SIZE: 1099 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 1100 case R_008C7C_SQ_REDUC_RING_SIZE: 1101 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 1102 case R_008C64_SQ_VSTMP_RING_SIZE: 1103 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1104 /* get value to populate the IB don't remove */ 1105 tmp =radeon_get_ib_value(p, idx); 1106 ib[idx] = 0; 1107 break; 1108 case SQ_CONFIG: 1109 track->sq_config = radeon_get_ib_value(p, idx); 1110 break; 1111 case R_028800_DB_DEPTH_CONTROL: 1112 track->db_depth_control = radeon_get_ib_value(p, idx); 1113 track->db_dirty = true; 1114 break; 1115 case R_028010_DB_DEPTH_INFO: 1116 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1117 r600_cs_packet_next_is_pkt3_nop(p)) { 1118 r = r600_cs_packet_next_reloc(p, &reloc); 1119 if (r) { 1120 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1121 "0x%04X\n", reg); 1122 return -EINVAL; 1123 } 1124 track->db_depth_info = radeon_get_ib_value(p, idx); 1125 ib[idx] &= C_028010_ARRAY_MODE; 1126 track->db_depth_info &= C_028010_ARRAY_MODE; 1127 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1128 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1129 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1130 } else { 1131 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1132 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1133 } 1134 } else { 1135 track->db_depth_info = radeon_get_ib_value(p, idx); 1136 } 1137 track->db_dirty = true; 1138 break; 1139 case R_028004_DB_DEPTH_VIEW: 1140 track->db_depth_view = radeon_get_ib_value(p, idx); 1141 track->db_dirty = true; 1142 break; 1143 case R_028000_DB_DEPTH_SIZE: 1144 track->db_depth_size = radeon_get_ib_value(p, idx); 1145 track->db_depth_size_idx = idx; 1146 track->db_dirty = true; 1147 break; 1148 case R_028AB0_VGT_STRMOUT_EN: 1149 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 1150 track->streamout_dirty = true; 1151 break; 1152 case R_028B20_VGT_STRMOUT_BUFFER_EN: 1153 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 1154 track->streamout_dirty = true; 1155 break; 1156 case VGT_STRMOUT_BUFFER_BASE_0: 1157 case VGT_STRMOUT_BUFFER_BASE_1: 1158 case VGT_STRMOUT_BUFFER_BASE_2: 1159 case VGT_STRMOUT_BUFFER_BASE_3: 1160 r = r600_cs_packet_next_reloc(p, &reloc); 1161 if (r) { 1162 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1163 "0x%04X\n", reg); 1164 return -EINVAL; 1165 } 1166 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1167 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1168 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1169 track->vgt_strmout_bo[tmp] = reloc->robj; 1170 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; 1171 track->streamout_dirty = true; 1172 break; 1173 case VGT_STRMOUT_BUFFER_SIZE_0: 1174 case VGT_STRMOUT_BUFFER_SIZE_1: 1175 case VGT_STRMOUT_BUFFER_SIZE_2: 1176 case VGT_STRMOUT_BUFFER_SIZE_3: 1177 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; 1178 /* size in register is DWs, convert to bytes */ 1179 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; 1180 track->streamout_dirty = true; 1181 break; 1182 case CP_COHER_BASE: 1183 r = r600_cs_packet_next_reloc(p, &reloc); 1184 if (r) { 1185 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1186 "0x%04X\n", reg); 1187 return -EINVAL; 1188 } 1189 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1190 break; 1191 case R_028238_CB_TARGET_MASK: 1192 track->cb_target_mask = radeon_get_ib_value(p, idx); 1193 track->cb_dirty = true; 1194 break; 1195 case R_02823C_CB_SHADER_MASK: 1196 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1197 break; 1198 case R_028C04_PA_SC_AA_CONFIG: 1199 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1200 track->log_nsamples = tmp; 1201 track->nsamples = 1 << tmp; 1202 track->cb_dirty = true; 1203 break; 1204 case R_028808_CB_COLOR_CONTROL: 1205 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); 1206 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; 1207 track->cb_dirty = true; 1208 break; 1209 case R_0280A0_CB_COLOR0_INFO: 1210 case R_0280A4_CB_COLOR1_INFO: 1211 case R_0280A8_CB_COLOR2_INFO: 1212 case R_0280AC_CB_COLOR3_INFO: 1213 case R_0280B0_CB_COLOR4_INFO: 1214 case R_0280B4_CB_COLOR5_INFO: 1215 case R_0280B8_CB_COLOR6_INFO: 1216 case R_0280BC_CB_COLOR7_INFO: 1217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1218 r600_cs_packet_next_is_pkt3_nop(p)) { 1219 r = r600_cs_packet_next_reloc(p, &reloc); 1220 if (r) { 1221 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1222 return -EINVAL; 1223 } 1224 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1225 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1226 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1227 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1228 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1229 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1230 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1231 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1232 } 1233 } else { 1234 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1235 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1236 } 1237 track->cb_dirty = true; 1238 break; 1239 case R_028080_CB_COLOR0_VIEW: 1240 case R_028084_CB_COLOR1_VIEW: 1241 case R_028088_CB_COLOR2_VIEW: 1242 case R_02808C_CB_COLOR3_VIEW: 1243 case R_028090_CB_COLOR4_VIEW: 1244 case R_028094_CB_COLOR5_VIEW: 1245 case R_028098_CB_COLOR6_VIEW: 1246 case R_02809C_CB_COLOR7_VIEW: 1247 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; 1248 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); 1249 track->cb_dirty = true; 1250 break; 1251 case R_028060_CB_COLOR0_SIZE: 1252 case R_028064_CB_COLOR1_SIZE: 1253 case R_028068_CB_COLOR2_SIZE: 1254 case R_02806C_CB_COLOR3_SIZE: 1255 case R_028070_CB_COLOR4_SIZE: 1256 case R_028074_CB_COLOR5_SIZE: 1257 case R_028078_CB_COLOR6_SIZE: 1258 case R_02807C_CB_COLOR7_SIZE: 1259 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1260 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1261 track->cb_color_size_idx[tmp] = idx; 1262 track->cb_dirty = true; 1263 break; 1264 /* This register were added late, there is userspace 1265 * which does provide relocation for those but set 1266 * 0 offset. In order to avoid breaking old userspace 1267 * we detect this and set address to point to last 1268 * CB_COLOR0_BASE, note that if userspace doesn't set 1269 * CB_COLOR0_BASE before this register we will report 1270 * error. Old userspace always set CB_COLOR0_BASE 1271 * before any of this. 1272 */ 1273 case R_0280E0_CB_COLOR0_FRAG: 1274 case R_0280E4_CB_COLOR1_FRAG: 1275 case R_0280E8_CB_COLOR2_FRAG: 1276 case R_0280EC_CB_COLOR3_FRAG: 1277 case R_0280F0_CB_COLOR4_FRAG: 1278 case R_0280F4_CB_COLOR5_FRAG: 1279 case R_0280F8_CB_COLOR6_FRAG: 1280 case R_0280FC_CB_COLOR7_FRAG: 1281 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1282 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1283 if (!track->cb_color_base_last[tmp]) { 1284 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1285 return -EINVAL; 1286 } 1287 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1288 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1289 ib[idx] = track->cb_color_base_last[tmp]; 1290 } else { 1291 r = r600_cs_packet_next_reloc(p, &reloc); 1292 if (r) { 1293 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1294 return -EINVAL; 1295 } 1296 track->cb_color_frag_bo[tmp] = reloc->robj; 1297 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1298 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1299 } 1300 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1301 track->cb_dirty = true; 1302 } 1303 break; 1304 case R_0280C0_CB_COLOR0_TILE: 1305 case R_0280C4_CB_COLOR1_TILE: 1306 case R_0280C8_CB_COLOR2_TILE: 1307 case R_0280CC_CB_COLOR3_TILE: 1308 case R_0280D0_CB_COLOR4_TILE: 1309 case R_0280D4_CB_COLOR5_TILE: 1310 case R_0280D8_CB_COLOR6_TILE: 1311 case R_0280DC_CB_COLOR7_TILE: 1312 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1313 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1314 if (!track->cb_color_base_last[tmp]) { 1315 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1316 return -EINVAL; 1317 } 1318 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1319 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1320 ib[idx] = track->cb_color_base_last[tmp]; 1321 } else { 1322 r = r600_cs_packet_next_reloc(p, &reloc); 1323 if (r) { 1324 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1325 return -EINVAL; 1326 } 1327 track->cb_color_tile_bo[tmp] = reloc->robj; 1328 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1329 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1330 } 1331 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1332 track->cb_dirty = true; 1333 } 1334 break; 1335 case R_028100_CB_COLOR0_MASK: 1336 case R_028104_CB_COLOR1_MASK: 1337 case R_028108_CB_COLOR2_MASK: 1338 case R_02810C_CB_COLOR3_MASK: 1339 case R_028110_CB_COLOR4_MASK: 1340 case R_028114_CB_COLOR5_MASK: 1341 case R_028118_CB_COLOR6_MASK: 1342 case R_02811C_CB_COLOR7_MASK: 1343 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1344 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); 1345 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1346 track->cb_dirty = true; 1347 } 1348 break; 1349 case CB_COLOR0_BASE: 1350 case CB_COLOR1_BASE: 1351 case CB_COLOR2_BASE: 1352 case CB_COLOR3_BASE: 1353 case CB_COLOR4_BASE: 1354 case CB_COLOR5_BASE: 1355 case CB_COLOR6_BASE: 1356 case CB_COLOR7_BASE: 1357 r = r600_cs_packet_next_reloc(p, &reloc); 1358 if (r) { 1359 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1360 "0x%04X\n", reg); 1361 return -EINVAL; 1362 } 1363 tmp = (reg - CB_COLOR0_BASE) / 4; 1364 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1365 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1366 track->cb_color_base_last[tmp] = ib[idx]; 1367 track->cb_color_bo[tmp] = reloc->robj; 1368 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1369 track->cb_dirty = true; 1370 break; 1371 case DB_DEPTH_BASE: 1372 r = r600_cs_packet_next_reloc(p, &reloc); 1373 if (r) { 1374 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1375 "0x%04X\n", reg); 1376 return -EINVAL; 1377 } 1378 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1379 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1380 track->db_bo = reloc->robj; 1381 track->db_bo_mc = reloc->lobj.gpu_offset; 1382 track->db_dirty = true; 1383 break; 1384 case DB_HTILE_DATA_BASE: 1385 r = r600_cs_packet_next_reloc(p, &reloc); 1386 if (r) { 1387 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1388 "0x%04X\n", reg); 1389 return -EINVAL; 1390 } 1391 track->htile_offset = radeon_get_ib_value(p, idx) << 8; 1392 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1393 track->htile_bo = reloc->robj; 1394 track->db_dirty = true; 1395 break; 1396 case DB_HTILE_SURFACE: 1397 track->htile_surface = radeon_get_ib_value(p, idx); 1398 /* force 8x8 htile width and height */ 1399 ib[idx] |= 3; 1400 track->db_dirty = true; 1401 break; 1402 case SQ_PGM_START_FS: 1403 case SQ_PGM_START_ES: 1404 case SQ_PGM_START_VS: 1405 case SQ_PGM_START_GS: 1406 case SQ_PGM_START_PS: 1407 case SQ_ALU_CONST_CACHE_GS_0: 1408 case SQ_ALU_CONST_CACHE_GS_1: 1409 case SQ_ALU_CONST_CACHE_GS_2: 1410 case SQ_ALU_CONST_CACHE_GS_3: 1411 case SQ_ALU_CONST_CACHE_GS_4: 1412 case SQ_ALU_CONST_CACHE_GS_5: 1413 case SQ_ALU_CONST_CACHE_GS_6: 1414 case SQ_ALU_CONST_CACHE_GS_7: 1415 case SQ_ALU_CONST_CACHE_GS_8: 1416 case SQ_ALU_CONST_CACHE_GS_9: 1417 case SQ_ALU_CONST_CACHE_GS_10: 1418 case SQ_ALU_CONST_CACHE_GS_11: 1419 case SQ_ALU_CONST_CACHE_GS_12: 1420 case SQ_ALU_CONST_CACHE_GS_13: 1421 case SQ_ALU_CONST_CACHE_GS_14: 1422 case SQ_ALU_CONST_CACHE_GS_15: 1423 case SQ_ALU_CONST_CACHE_PS_0: 1424 case SQ_ALU_CONST_CACHE_PS_1: 1425 case SQ_ALU_CONST_CACHE_PS_2: 1426 case SQ_ALU_CONST_CACHE_PS_3: 1427 case SQ_ALU_CONST_CACHE_PS_4: 1428 case SQ_ALU_CONST_CACHE_PS_5: 1429 case SQ_ALU_CONST_CACHE_PS_6: 1430 case SQ_ALU_CONST_CACHE_PS_7: 1431 case SQ_ALU_CONST_CACHE_PS_8: 1432 case SQ_ALU_CONST_CACHE_PS_9: 1433 case SQ_ALU_CONST_CACHE_PS_10: 1434 case SQ_ALU_CONST_CACHE_PS_11: 1435 case SQ_ALU_CONST_CACHE_PS_12: 1436 case SQ_ALU_CONST_CACHE_PS_13: 1437 case SQ_ALU_CONST_CACHE_PS_14: 1438 case SQ_ALU_CONST_CACHE_PS_15: 1439 case SQ_ALU_CONST_CACHE_VS_0: 1440 case SQ_ALU_CONST_CACHE_VS_1: 1441 case SQ_ALU_CONST_CACHE_VS_2: 1442 case SQ_ALU_CONST_CACHE_VS_3: 1443 case SQ_ALU_CONST_CACHE_VS_4: 1444 case SQ_ALU_CONST_CACHE_VS_5: 1445 case SQ_ALU_CONST_CACHE_VS_6: 1446 case SQ_ALU_CONST_CACHE_VS_7: 1447 case SQ_ALU_CONST_CACHE_VS_8: 1448 case SQ_ALU_CONST_CACHE_VS_9: 1449 case SQ_ALU_CONST_CACHE_VS_10: 1450 case SQ_ALU_CONST_CACHE_VS_11: 1451 case SQ_ALU_CONST_CACHE_VS_12: 1452 case SQ_ALU_CONST_CACHE_VS_13: 1453 case SQ_ALU_CONST_CACHE_VS_14: 1454 case SQ_ALU_CONST_CACHE_VS_15: 1455 r = r600_cs_packet_next_reloc(p, &reloc); 1456 if (r) { 1457 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1458 "0x%04X\n", reg); 1459 return -EINVAL; 1460 } 1461 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1462 break; 1463 case SX_MEMORY_EXPORT_BASE: 1464 r = r600_cs_packet_next_reloc(p, &reloc); 1465 if (r) { 1466 dev_warn(p->dev, "bad SET_CONFIG_REG " 1467 "0x%04X\n", reg); 1468 return -EINVAL; 1469 } 1470 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1471 break; 1472 case SX_MISC: 1473 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1474 break; 1475 default: 1476 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1477 return -EINVAL; 1478 } 1479 return 0; 1480} 1481 1482unsigned r600_mip_minify(unsigned size, unsigned level) 1483{ 1484 unsigned val; 1485 1486 val = max(1U, size >> level); 1487 if (level > 0) 1488 val = roundup_pow_of_two(val); 1489 return val; 1490} 1491 1492static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1493 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, 1494 unsigned block_align, unsigned height_align, unsigned base_align, 1495 unsigned *l0_size, unsigned *mipmap_size) 1496{ 1497 unsigned offset, i, level; 1498 unsigned width, height, depth, size; 1499 unsigned blocksize; 1500 unsigned nbx, nby; 1501 unsigned nlevels = llevel - blevel + 1; 1502 1503 *l0_size = -1; 1504 blocksize = r600_fmt_get_blocksize(format); 1505 1506 w0 = r600_mip_minify(w0, 0); 1507 h0 = r600_mip_minify(h0, 0); 1508 d0 = r600_mip_minify(d0, 0); 1509 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { 1510 width = r600_mip_minify(w0, i); 1511 nbx = r600_fmt_get_nblocksx(format, width); 1512 1513 nbx = roundup(nbx, block_align); 1514 1515 height = r600_mip_minify(h0, i); 1516 nby = r600_fmt_get_nblocksy(format, height); 1517 nby = roundup(nby, height_align); 1518 1519 depth = r600_mip_minify(d0, i); 1520 1521 size = nbx * nby * blocksize * nsamples; 1522 if (nfaces) 1523 size *= nfaces; 1524 else 1525 size *= depth; 1526 1527 if (i == 0) 1528 *l0_size = size; 1529 1530 if (i == 0 || i == 1) 1531 offset = roundup(offset, base_align); 1532 1533 offset += size; 1534 } 1535 *mipmap_size = offset; 1536 if (llevel == 0) 1537 *mipmap_size = *l0_size; 1538 if (!blevel) 1539 *mipmap_size -= *l0_size; 1540} 1541 1542/** 1543 * r600_check_texture_resource() - check if register is authorized or not 1544 * @p: parser structure holding parsing context 1545 * @idx: index into the cs buffer 1546 * @texture: texture's bo structure 1547 * @mipmap: mipmap's bo structure 1548 * 1549 * This function will check that the resource has valid field and that 1550 * the texture and mipmap bo object are big enough to cover this resource. 1551 */ 1552static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1553 struct radeon_bo *texture, 1554 struct radeon_bo *mipmap, 1555 u64 base_offset, 1556 u64 mip_offset, 1557 u32 tiling_flags) 1558{ 1559 struct r600_cs_track *track = p->track; 1560 u32 dim, nfaces, llevel, blevel, w0, h0, d0; 1561 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; 1562 u32 height_align, pitch, pitch_align, depth_align; 1563 u32 barray, larray; 1564 u64 base_align; 1565 struct array_mode_checker array_check; 1566 u32 format; 1567 bool is_array; 1568 1569 /* on legacy kernel we don't perform advanced check */ 1570 if (p->rdev == NULL) 1571 return 0; 1572 1573 /* convert to bytes */ 1574 base_offset <<= 8; 1575 mip_offset <<= 8; 1576 1577 word0 = radeon_get_ib_value(p, idx + 0); 1578 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1579 if (tiling_flags & RADEON_TILING_MACRO) 1580 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1581 else if (tiling_flags & RADEON_TILING_MICRO) 1582 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1583 } 1584 word1 = radeon_get_ib_value(p, idx + 1); 1585 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1586 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1587 word4 = radeon_get_ib_value(p, idx + 4); 1588 word5 = radeon_get_ib_value(p, idx + 5); 1589 dim = G_038000_DIM(word0); 1590 w0 = G_038000_TEX_WIDTH(word0) + 1; 1591 pitch = (G_038000_PITCH(word0) + 1) * 8; 1592 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1593 d0 = G_038004_TEX_DEPTH(word1); 1594 format = G_038004_DATA_FORMAT(word1); 1595 blevel = G_038010_BASE_LEVEL(word4); 1596 llevel = G_038014_LAST_LEVEL(word5); 1597 /* pitch in texels */ 1598 array_check.array_mode = G_038000_TILE_MODE(word0); 1599 array_check.group_size = track->group_size; 1600 array_check.nbanks = track->nbanks; 1601 array_check.npipes = track->npipes; 1602 array_check.nsamples = 1; 1603 array_check.blocksize = r600_fmt_get_blocksize(format); 1604 nfaces = 1; 1605 is_array = false; 1606 switch (dim) { 1607 case V_038000_SQ_TEX_DIM_1D: 1608 case V_038000_SQ_TEX_DIM_2D: 1609 case V_038000_SQ_TEX_DIM_3D: 1610 break; 1611 case V_038000_SQ_TEX_DIM_CUBEMAP: 1612 if (p->family >= CHIP_RV770) 1613 nfaces = 8; 1614 else 1615 nfaces = 6; 1616 break; 1617 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1618 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1619 is_array = true; 1620 break; 1621 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1622 is_array = true; 1623 /* fall through */ 1624 case V_038000_SQ_TEX_DIM_2D_MSAA: 1625 array_check.nsamples = 1 << llevel; 1626 llevel = 0; 1627 break; 1628 default: 1629 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1630 return -EINVAL; 1631 } 1632 if (!r600_fmt_is_valid_texture(format, p->family)) { 1633 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1634 __func__, __LINE__, format); 1635 return -EINVAL; 1636 } 1637 1638 if (r600_get_array_mode_alignment(&array_check, 1639 &pitch_align, &height_align, &depth_align, &base_align)) { 1640 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1641 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1642 return -EINVAL; 1643 } 1644 1645 /* XXX check height as well... */ 1646 1647 if (!IS_ALIGNED(pitch, pitch_align)) { 1648 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1649 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1650 return -EINVAL; 1651 } 1652 if (!IS_ALIGNED(base_offset, base_align)) { 1653 dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n", 1654 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1655 return -EINVAL; 1656 } 1657 if (!IS_ALIGNED(mip_offset, base_align)) { 1658 dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n", 1659 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1660 return -EINVAL; 1661 } 1662 1663 if (blevel > llevel) { 1664 dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1665 blevel, llevel); 1666 } 1667 if (is_array) { 1668 barray = G_038014_BASE_ARRAY(word5); 1669 larray = G_038014_LAST_ARRAY(word5); 1670 1671 nfaces = larray - barray + 1; 1672 } 1673 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, 1674 pitch_align, height_align, base_align, 1675 &l0_size, &mipmap_size); 1676 /* using get ib will give us the offset into the texture bo */ 1677 if ((l0_size + word2) > radeon_bo_size(texture)) { 1678 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1679 w0, h0, pitch_align, height_align, 1680 array_check.array_mode, format, word2, 1681 l0_size, radeon_bo_size(texture)); 1682 dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align); 1683 return -EINVAL; 1684 } 1685 /* using get ib will give us the offset into the mipmap bo */ 1686 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1687 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1688 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1689 } 1690 return 0; 1691} 1692 1693static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1694{ 1695 u32 m, i; 1696 1697 i = (reg >> 7); 1698 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1699 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1700 return false; 1701 } 1702 m = 1 << ((reg >> 2) & 31); 1703 if (!(r600_reg_safe_bm[i] & m)) 1704 return true; 1705 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1706 return false; 1707} 1708 1709static int r600_packet3_check(struct radeon_cs_parser *p, 1710 struct radeon_cs_packet *pkt) 1711{ 1712 struct radeon_cs_reloc *reloc; 1713 struct r600_cs_track *track; 1714 volatile u32 *ib; 1715 unsigned idx; 1716 unsigned i; 1717 unsigned start_reg, end_reg, reg; 1718 int r; 1719 u32 idx_value; 1720 1721 track = (struct r600_cs_track *)p->track; 1722 ib = p->ib.ptr; 1723 idx = pkt->idx + 1; 1724 idx_value = radeon_get_ib_value(p, idx); 1725 1726 switch (pkt->opcode) { 1727 case PACKET3_SET_PREDICATION: 1728 { 1729 int pred_op; 1730 int tmp; 1731 uint64_t offset; 1732 1733 if (pkt->count != 1) { 1734 DRM_ERROR("bad SET PREDICATION\n"); 1735 return -EINVAL; 1736 } 1737 1738 tmp = radeon_get_ib_value(p, idx + 1); 1739 pred_op = (tmp >> 16) & 0x7; 1740 1741 /* for the clear predicate operation */ 1742 if (pred_op == 0) 1743 return 0; 1744 1745 if (pred_op > 2) { 1746 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1747 return -EINVAL; 1748 } 1749 1750 r = r600_cs_packet_next_reloc(p, &reloc); 1751 if (r) { 1752 DRM_ERROR("bad SET PREDICATION\n"); 1753 return -EINVAL; 1754 } 1755 1756 offset = reloc->lobj.gpu_offset + 1757 (idx_value & 0xfffffff0) + 1758 ((u64)(tmp & 0xff) << 32); 1759 1760 ib[idx + 0] = offset; 1761 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1762 } 1763 break; 1764 1765 case PACKET3_START_3D_CMDBUF: 1766 if (p->family >= CHIP_RV770 || pkt->count) { 1767 DRM_ERROR("bad START_3D\n"); 1768 return -EINVAL; 1769 } 1770 break; 1771 case PACKET3_CONTEXT_CONTROL: 1772 if (pkt->count != 1) { 1773 DRM_ERROR("bad CONTEXT_CONTROL\n"); 1774 return -EINVAL; 1775 } 1776 break; 1777 case PACKET3_INDEX_TYPE: 1778 case PACKET3_NUM_INSTANCES: 1779 if (pkt->count) { 1780 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1781 return -EINVAL; 1782 } 1783 break; 1784 case PACKET3_DRAW_INDEX: 1785 { 1786 uint64_t offset; 1787 if (pkt->count != 3) { 1788 DRM_ERROR("bad DRAW_INDEX\n"); 1789 return -EINVAL; 1790 } 1791 r = r600_cs_packet_next_reloc(p, &reloc); 1792 if (r) { 1793 DRM_ERROR("bad DRAW_INDEX\n"); 1794 return -EINVAL; 1795 } 1796 1797 offset = reloc->lobj.gpu_offset + 1798 idx_value + 1799 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1800 1801 ib[idx+0] = offset; 1802 ib[idx+1] = upper_32_bits(offset) & 0xff; 1803 1804 r = r600_cs_track_check(p); 1805 if (r) { 1806 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1807 return r; 1808 } 1809 break; 1810 } 1811 case PACKET3_DRAW_INDEX_AUTO: 1812 if (pkt->count != 1) { 1813 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1814 return -EINVAL; 1815 } 1816 r = r600_cs_track_check(p); 1817 if (r) { 1818 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1819 return r; 1820 } 1821 break; 1822 case PACKET3_DRAW_INDEX_IMMD_BE: 1823 case PACKET3_DRAW_INDEX_IMMD: 1824 if (pkt->count < 2) { 1825 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1826 return -EINVAL; 1827 } 1828 r = r600_cs_track_check(p); 1829 if (r) { 1830 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1831 return r; 1832 } 1833 break; 1834 case PACKET3_WAIT_REG_MEM: 1835 if (pkt->count != 5) { 1836 DRM_ERROR("bad WAIT_REG_MEM\n"); 1837 return -EINVAL; 1838 } 1839 /* bit 4 is reg (0) or mem (1) */ 1840 if (idx_value & 0x10) { 1841 uint64_t offset; 1842 1843 r = r600_cs_packet_next_reloc(p, &reloc); 1844 if (r) { 1845 DRM_ERROR("bad WAIT_REG_MEM\n"); 1846 return -EINVAL; 1847 } 1848 1849 offset = reloc->lobj.gpu_offset + 1850 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1851 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1852 1853 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1854 ib[idx+2] = upper_32_bits(offset) & 0xff; 1855 } 1856 break; 1857 case PACKET3_CP_DMA: 1858 { 1859 u32 command, size; 1860 u64 offset, tmp; 1861 if (pkt->count != 4) { 1862 DRM_ERROR("bad CP DMA\n"); 1863 return -EINVAL; 1864 } 1865 command = radeon_get_ib_value(p, idx+4); 1866 size = command & 0x1fffff; 1867 if (command & PACKET3_CP_DMA_CMD_SAS) { 1868 /* src address space is register */ 1869 DRM_ERROR("CP DMA SAS not supported\n"); 1870 return -EINVAL; 1871 } else { 1872 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1873 DRM_ERROR("CP DMA SAIC only supported for registers\n"); 1874 return -EINVAL; 1875 } 1876 /* src address space is memory */ 1877 r = r600_cs_packet_next_reloc(p, &reloc); 1878 if (r) { 1879 DRM_ERROR("bad CP DMA SRC\n"); 1880 return -EINVAL; 1881 } 1882 1883 tmp = radeon_get_ib_value(p, idx) + 1884 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1885 1886 offset = reloc->lobj.gpu_offset + tmp; 1887 1888 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1889 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", 1890 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1891 return -EINVAL; 1892 } 1893 1894 ib[idx] = offset; 1895 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1896 } 1897 if (command & PACKET3_CP_DMA_CMD_DAS) { 1898 /* dst address space is register */ 1899 DRM_ERROR("CP DMA DAS not supported\n"); 1900 return -EINVAL; 1901 } else { 1902 /* dst address space is memory */ 1903 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1904 DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1905 return -EINVAL; 1906 } 1907 r = r600_cs_packet_next_reloc(p, &reloc); 1908 if (r) { 1909 DRM_ERROR("bad CP DMA DST\n"); 1910 return -EINVAL; 1911 } 1912 1913 tmp = radeon_get_ib_value(p, idx+2) + 1914 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1915 1916 offset = reloc->lobj.gpu_offset + tmp; 1917 1918 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1919 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", 1920 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1921 return -EINVAL; 1922 } 1923 1924 ib[idx+2] = offset; 1925 ib[idx+3] = upper_32_bits(offset) & 0xff; 1926 } 1927 break; 1928 } 1929 case PACKET3_SURFACE_SYNC: 1930 if (pkt->count != 3) { 1931 DRM_ERROR("bad SURFACE_SYNC\n"); 1932 return -EINVAL; 1933 } 1934 /* 0xffffffff/0x0 is flush all cache flag */ 1935 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1936 radeon_get_ib_value(p, idx + 2) != 0) { 1937 r = r600_cs_packet_next_reloc(p, &reloc); 1938 if (r) { 1939 DRM_ERROR("bad SURFACE_SYNC\n"); 1940 return -EINVAL; 1941 } 1942 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1943 } 1944 break; 1945 case PACKET3_EVENT_WRITE: 1946 if (pkt->count != 2 && pkt->count != 0) { 1947 DRM_ERROR("bad EVENT_WRITE\n"); 1948 return -EINVAL; 1949 } 1950 if (pkt->count) { 1951 uint64_t offset; 1952 1953 r = r600_cs_packet_next_reloc(p, &reloc); 1954 if (r) { 1955 DRM_ERROR("bad EVENT_WRITE\n"); 1956 return -EINVAL; 1957 } 1958 offset = reloc->lobj.gpu_offset + 1959 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 1960 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1961 1962 ib[idx+1] = offset & 0xfffffff8; 1963 ib[idx+2] = upper_32_bits(offset) & 0xff; 1964 } 1965 break; 1966 case PACKET3_EVENT_WRITE_EOP: 1967 { 1968 uint64_t offset; 1969 1970 if (pkt->count != 4) { 1971 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 1972 return -EINVAL; 1973 } 1974 r = r600_cs_packet_next_reloc(p, &reloc); 1975 if (r) { 1976 DRM_ERROR("bad EVENT_WRITE\n"); 1977 return -EINVAL; 1978 } 1979 1980 offset = reloc->lobj.gpu_offset + 1981 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 1982 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1983 1984 ib[idx+1] = offset & 0xfffffffc; 1985 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1986 break; 1987 } 1988 case PACKET3_SET_CONFIG_REG: 1989 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 1990 end_reg = 4 * pkt->count + start_reg - 4; 1991 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 1992 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 1993 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 1994 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 1995 return -EINVAL; 1996 } 1997 for (i = 0; i < pkt->count; i++) { 1998 reg = start_reg + (4 * i); 1999 r = r600_cs_check_reg(p, reg, idx+1+i); 2000 if (r) 2001 return r; 2002 } 2003 break; 2004 case PACKET3_SET_CONTEXT_REG: 2005 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 2006 end_reg = 4 * pkt->count + start_reg - 4; 2007 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 2008 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 2009 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 2010 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2011 return -EINVAL; 2012 } 2013 for (i = 0; i < pkt->count; i++) { 2014 reg = start_reg + (4 * i); 2015 r = r600_cs_check_reg(p, reg, idx+1+i); 2016 if (r) 2017 return r; 2018 } 2019 break; 2020 case PACKET3_SET_RESOURCE: 2021 if (pkt->count % 7) { 2022 DRM_ERROR("bad SET_RESOURCE\n"); 2023 return -EINVAL; 2024 } 2025 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 2026 end_reg = 4 * pkt->count + start_reg - 4; 2027 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 2028 (start_reg >= PACKET3_SET_RESOURCE_END) || 2029 (end_reg >= PACKET3_SET_RESOURCE_END)) { 2030 DRM_ERROR("bad SET_RESOURCE\n"); 2031 return -EINVAL; 2032 } 2033 for (i = 0; i < (pkt->count / 7); i++) { 2034 struct radeon_bo *texture, *mipmap; 2035 u32 size, offset, base_offset, mip_offset; 2036 2037 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 2038 case SQ_TEX_VTX_VALID_TEXTURE: 2039 /* tex base */ 2040 r = r600_cs_packet_next_reloc(p, &reloc); 2041 if (r) { 2042 DRM_ERROR("bad SET_RESOURCE\n"); 2043 return -EINVAL; 2044 } 2045 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2046 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 2047 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 2048 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 2049 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 2050 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 2051 } 2052 texture = reloc->robj; 2053 /* tex mip base */ 2054 r = r600_cs_packet_next_reloc(p, &reloc); 2055 if (r) { 2056 DRM_ERROR("bad SET_RESOURCE\n"); 2057 return -EINVAL; 2058 } 2059 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2060 mipmap = reloc->robj; 2061 r = r600_check_texture_resource(p, idx+(i*7)+1, 2062 texture, mipmap, 2063 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 2064 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 2065 reloc->lobj.tiling_flags); 2066 if (r) 2067 return r; 2068 ib[idx+1+(i*7)+2] += base_offset; 2069 ib[idx+1+(i*7)+3] += mip_offset; 2070 break; 2071 case SQ_TEX_VTX_VALID_BUFFER: 2072 { 2073 uint64_t offset64; 2074 /* vtx base */ 2075 r = r600_cs_packet_next_reloc(p, &reloc); 2076 if (r) { 2077 DRM_ERROR("bad SET_RESOURCE\n"); 2078 return -EINVAL; 2079 } 2080 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2081 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2082 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2083 /* force size to size of the buffer */ 2084 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2085 size + offset, radeon_bo_size(reloc->robj)); 2086 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2087 } 2088 2089 offset64 = reloc->lobj.gpu_offset + offset; 2090 ib[idx+1+(i*8)+0] = offset64; 2091 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2092 (upper_32_bits(offset64) & 0xff); 2093 break; 2094 } 2095 case SQ_TEX_VTX_INVALID_TEXTURE: 2096 case SQ_TEX_VTX_INVALID_BUFFER: 2097 default: 2098 DRM_ERROR("bad SET_RESOURCE\n"); 2099 return -EINVAL; 2100 } 2101 } 2102 break; 2103 case PACKET3_SET_ALU_CONST: 2104 if (track->sq_config & DX9_CONSTS) { 2105 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 2106 end_reg = 4 * pkt->count + start_reg - 4; 2107 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2108 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2109 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2110 DRM_ERROR("bad SET_ALU_CONST\n"); 2111 return -EINVAL; 2112 } 2113 } 2114 break; 2115 case PACKET3_SET_BOOL_CONST: 2116 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 2117 end_reg = 4 * pkt->count + start_reg - 4; 2118 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2119 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2120 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2121 DRM_ERROR("bad SET_BOOL_CONST\n"); 2122 return -EINVAL; 2123 } 2124 break; 2125 case PACKET3_SET_LOOP_CONST: 2126 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 2127 end_reg = 4 * pkt->count + start_reg - 4; 2128 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2129 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2130 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2131 DRM_ERROR("bad SET_LOOP_CONST\n"); 2132 return -EINVAL; 2133 } 2134 break; 2135 case PACKET3_SET_CTL_CONST: 2136 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 2137 end_reg = 4 * pkt->count + start_reg - 4; 2138 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2139 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2140 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2141 DRM_ERROR("bad SET_CTL_CONST\n"); 2142 return -EINVAL; 2143 } 2144 break; 2145 case PACKET3_SET_SAMPLER: 2146 if (pkt->count % 3) { 2147 DRM_ERROR("bad SET_SAMPLER\n"); 2148 return -EINVAL; 2149 } 2150 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 2151 end_reg = 4 * pkt->count + start_reg - 4; 2152 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2153 (start_reg >= PACKET3_SET_SAMPLER_END) || 2154 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2155 DRM_ERROR("bad SET_SAMPLER\n"); 2156 return -EINVAL; 2157 } 2158 break; 2159 case PACKET3_STRMOUT_BASE_UPDATE: 2160 /* RS780 and RS880 also need this */ 2161 if (p->family < CHIP_RS780) { 2162 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2163 return -EINVAL; 2164 } 2165 if (pkt->count != 1) { 2166 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); 2167 return -EINVAL; 2168 } 2169 if (idx_value > 3) { 2170 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); 2171 return -EINVAL; 2172 } 2173 { 2174 u64 offset; 2175 2176 r = r600_cs_packet_next_reloc(p, &reloc); 2177 if (r) { 2178 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2179 return -EINVAL; 2180 } 2181 2182 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2183 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2184 return -EINVAL; 2185 } 2186 2187 offset = radeon_get_ib_value(p, idx+1) << 8; 2188 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2189 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n", 2190 (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]); 2191 return -EINVAL; 2192 } 2193 2194 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2195 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n", 2196 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2197 return -EINVAL; 2198 } 2199 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2200 } 2201 break; 2202 case PACKET3_SURFACE_BASE_UPDATE: 2203 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2204 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2205 return -EINVAL; 2206 } 2207 if (pkt->count) { 2208 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2209 return -EINVAL; 2210 } 2211 break; 2212 case PACKET3_STRMOUT_BUFFER_UPDATE: 2213 if (pkt->count != 4) { 2214 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2215 return -EINVAL; 2216 } 2217 /* Updating memory at DST_ADDRESS. */ 2218 if (idx_value & 0x1) { 2219 u64 offset; 2220 r = r600_cs_packet_next_reloc(p, &reloc); 2221 if (r) { 2222 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2223 return -EINVAL; 2224 } 2225 offset = radeon_get_ib_value(p, idx+1); 2226 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2227 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2228 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", 2229 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2230 return -EINVAL; 2231 } 2232 offset += reloc->lobj.gpu_offset; 2233 ib[idx+1] = offset; 2234 ib[idx+2] = upper_32_bits(offset) & 0xff; 2235 } 2236 /* Reading data from SRC_ADDRESS. */ 2237 if (((idx_value >> 1) & 0x3) == 2) { 2238 u64 offset; 2239 r = r600_cs_packet_next_reloc(p, &reloc); 2240 if (r) { 2241 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2242 return -EINVAL; 2243 } 2244 offset = radeon_get_ib_value(p, idx+3); 2245 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2246 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2247 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", 2248 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2249 return -EINVAL; 2250 } 2251 offset += reloc->lobj.gpu_offset; 2252 ib[idx+3] = offset; 2253 ib[idx+4] = upper_32_bits(offset) & 0xff; 2254 } 2255 break; 2256 case PACKET3_MEM_WRITE: 2257 { 2258 u64 offset; 2259 2260 if (pkt->count != 3) { 2261 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2262 return -EINVAL; 2263 } 2264 r = r600_cs_packet_next_reloc(p, &reloc); 2265 if (r) { 2266 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2267 return -EINVAL; 2268 } 2269 offset = radeon_get_ib_value(p, idx+0); 2270 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2271 if (offset & 0x7) { 2272 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2273 return -EINVAL; 2274 } 2275 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2276 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", 2277 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); 2278 return -EINVAL; 2279 } 2280 offset += reloc->lobj.gpu_offset; 2281 ib[idx+0] = offset; 2282 ib[idx+1] = upper_32_bits(offset) & 0xff; 2283 break; 2284 } 2285 case PACKET3_COPY_DW: 2286 if (pkt->count != 4) { 2287 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2288 return -EINVAL; 2289 } 2290 if (idx_value & 0x1) { 2291 u64 offset; 2292 /* SRC is memory. */ 2293 r = r600_cs_packet_next_reloc(p, &reloc); 2294 if (r) { 2295 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2296 return -EINVAL; 2297 } 2298 offset = radeon_get_ib_value(p, idx+1); 2299 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2300 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2301 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", 2302 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2303 return -EINVAL; 2304 } 2305 offset += reloc->lobj.gpu_offset; 2306 ib[idx+1] = offset; 2307 ib[idx+2] = upper_32_bits(offset) & 0xff; 2308 } else { 2309 /* SRC is a reg. */ 2310 reg = radeon_get_ib_value(p, idx+1) << 2; 2311 if (!r600_is_safe_reg(p, reg, idx+1)) 2312 return -EINVAL; 2313 } 2314 if (idx_value & 0x2) { 2315 u64 offset; 2316 /* DST is memory. */ 2317 r = r600_cs_packet_next_reloc(p, &reloc); 2318 if (r) { 2319 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2320 return -EINVAL; 2321 } 2322 offset = radeon_get_ib_value(p, idx+3); 2323 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2324 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2325 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", 2326 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2327 return -EINVAL; 2328 } 2329 offset += reloc->lobj.gpu_offset; 2330 ib[idx+3] = offset; 2331 ib[idx+4] = upper_32_bits(offset) & 0xff; 2332 } else { 2333 /* DST is a reg. */ 2334 reg = radeon_get_ib_value(p, idx+3) << 2; 2335 if (!r600_is_safe_reg(p, reg, idx+3)) 2336 return -EINVAL; 2337 } 2338 break; 2339 case PACKET3_NOP: 2340 break; 2341 default: 2342 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2343 return -EINVAL; 2344 } 2345 return 0; 2346} 2347 2348int r600_cs_parse(struct radeon_cs_parser *p) 2349{ 2350 struct radeon_cs_packet pkt; 2351 struct r600_cs_track *track; 2352 int r; 2353 2354 if (p->track == NULL) { 2355 /* initialize tracker, we are in kms */ 2356 track = malloc(sizeof(*track), 2357 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); 2358 if (track == NULL) 2359 return -ENOMEM; 2360 r600_cs_track_init(track); 2361 if (p->rdev->family < CHIP_RV770) { 2362 track->npipes = p->rdev->config.r600.tiling_npipes; 2363 track->nbanks = p->rdev->config.r600.tiling_nbanks; 2364 track->group_size = p->rdev->config.r600.tiling_group_size; 2365 } else if (p->rdev->family <= CHIP_RV740) { 2366 track->npipes = p->rdev->config.rv770.tiling_npipes; 2367 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 2368 track->group_size = p->rdev->config.rv770.tiling_group_size; 2369 } 2370 p->track = track; 2371 } 2372 do { 2373 r = r600_cs_packet_parse(p, &pkt, p->idx); 2374 if (r) { 2375 free(p->track, DRM_MEM_DRIVER); 2376 p->track = NULL; 2377 return r; 2378 } 2379 p->idx += pkt.count + 2; 2380 switch (pkt.type) { 2381 case PACKET_TYPE0: 2382 r = r600_cs_parse_packet0(p, &pkt); 2383 break; 2384 case PACKET_TYPE2: 2385 break; 2386 case PACKET_TYPE3: 2387 r = r600_packet3_check(p, &pkt); 2388 break; 2389 default: 2390 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2391 free(p->track, DRM_MEM_DRIVER); 2392 p->track = NULL; 2393 return -EINVAL; 2394 } 2395 if (r) { 2396 free(p->track, DRM_MEM_DRIVER); 2397 p->track = NULL; 2398 return r; 2399 } 2400 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2401#if 0 2402 for (r = 0; r < p->ib.length_dw; r++) { 2403 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2404 mdelay(1); 2405 } 2406#endif 2407 free(p->track, DRM_MEM_DRIVER); 2408 p->track = NULL; 2409 return 0; 2410} 2411 2412/* 2413 * DMA 2414 */ 2415/** 2416 * r600_dma_cs_next_reloc() - parse next reloc 2417 * @p: parser structure holding parsing context. 2418 * @cs_reloc: reloc informations 2419 * 2420 * Return the next reloc, do bo validation and compute 2421 * GPU offset using the provided start. 2422 **/ 2423int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2424 struct radeon_cs_reloc **cs_reloc) 2425{ 2426 struct radeon_cs_chunk *relocs_chunk; 2427 unsigned idx; 2428 2429 *cs_reloc = NULL; 2430 if (p->chunk_relocs_idx == -1) { 2431 DRM_ERROR("No relocation chunk !\n"); 2432 return -EINVAL; 2433 } 2434 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2435 idx = p->dma_reloc_idx; 2436 if (idx >= p->nrelocs) { 2437 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2438 idx, p->nrelocs); 2439 return -EINVAL; 2440 } 2441 *cs_reloc = p->relocs_ptr[idx]; 2442 p->dma_reloc_idx++; 2443 return 0; 2444} 2445 2446#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) 2447#define GET_DMA_COUNT(h) ((h) & 0x0000ffff) 2448#define GET_DMA_T(h) (((h) & 0x00800000) >> 23) 2449 2450/** 2451 * r600_dma_cs_parse() - parse the DMA IB 2452 * @p: parser structure holding parsing context. 2453 * 2454 * Parses the DMA IB from the CS ioctl and updates 2455 * the GPU addresses based on the reloc information and 2456 * checks for errors. (R6xx-R7xx) 2457 * Returns 0 for success and an error on failure. 2458 **/ 2459int r600_dma_cs_parse(struct radeon_cs_parser *p) 2460{ 2461 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2462 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2463 u32 header, cmd, count, tiled; 2464 volatile u32 *ib = p->ib.ptr; 2465 u32 idx, idx_value; 2466 u64 src_offset, dst_offset; 2467 int r; 2468 2469 do { 2470 if (p->idx >= ib_chunk->length_dw) { 2471 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2472 p->idx, ib_chunk->length_dw); 2473 return -EINVAL; 2474 } 2475 idx = p->idx; 2476 header = radeon_get_ib_value(p, idx); 2477 cmd = GET_DMA_CMD(header); 2478 count = GET_DMA_COUNT(header); 2479 tiled = GET_DMA_T(header); 2480 2481 switch (cmd) { 2482 case DMA_PACKET_WRITE: 2483 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2484 if (r) { 2485 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2486 return -EINVAL; 2487 } 2488 if (tiled) { 2489 dst_offset = radeon_get_ib_value(p, idx+1); 2490 dst_offset <<= 8; 2491 2492 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2493 p->idx += count + 5; 2494 } else { 2495 dst_offset = radeon_get_ib_value(p, idx+1); 2496 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2497 2498 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2499 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2500 p->idx += count + 3; 2501 } 2502 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2503 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", 2504 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2505 return -EINVAL; 2506 } 2507 break; 2508 case DMA_PACKET_COPY: 2509 r = r600_dma_cs_next_reloc(p, &src_reloc); 2510 if (r) { 2511 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2512 return -EINVAL; 2513 } 2514 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2515 if (r) { 2516 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2517 return -EINVAL; 2518 } 2519 if (tiled) { 2520 idx_value = radeon_get_ib_value(p, idx + 2); 2521 /* detile bit */ 2522 if (idx_value & (1U << 31)) { 2523 /* tiled src, linear dst */ 2524 src_offset = radeon_get_ib_value(p, idx+1); 2525 src_offset <<= 8; 2526 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2527 2528 dst_offset = radeon_get_ib_value(p, idx+5); 2529 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2530 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2531 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2532 } else { 2533 /* linear src, tiled dst */ 2534 src_offset = radeon_get_ib_value(p, idx+5); 2535 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2536 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2537 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2538 2539 dst_offset = radeon_get_ib_value(p, idx+1); 2540 dst_offset <<= 8; 2541 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2542 } 2543 p->idx += 7; 2544 } else { 2545 if (p->family >= CHIP_RV770) { 2546 src_offset = radeon_get_ib_value(p, idx+2); 2547 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2548 dst_offset = radeon_get_ib_value(p, idx+1); 2549 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2550 2551 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2552 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2553 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2554 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2555 p->idx += 5; 2556 } else { 2557 src_offset = radeon_get_ib_value(p, idx+2); 2558 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2559 dst_offset = radeon_get_ib_value(p, idx+1); 2560 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2561 2562 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2563 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2564 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2565 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; 2566 p->idx += 4; 2567 } 2568 } 2569 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2570 dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n", 2571 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2572 return -EINVAL; 2573 } 2574 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2575 dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n", 2576 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2577 return -EINVAL; 2578 } 2579 break; 2580 case DMA_PACKET_CONSTANT_FILL: 2581 if (p->family < CHIP_RV770) { 2582 DRM_ERROR("Constant Fill is 7xx only !\n"); 2583 return -EINVAL; 2584 } 2585 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2586 if (r) { 2587 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2588 return -EINVAL; 2589 } 2590 dst_offset = radeon_get_ib_value(p, idx+1); 2591 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2592 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2593 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", 2594 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2595 return -EINVAL; 2596 } 2597 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2598 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 2599 p->idx += 4; 2600 break; 2601 case DMA_PACKET_NOP: 2602 p->idx += 1; 2603 break; 2604 default: 2605 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2606 return -EINVAL; 2607 } 2608 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2609#if 0 2610 for (r = 0; r < p->ib->length_dw; r++) { 2611 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2612 mdelay(1); 2613 } 2614#endif 2615 return 0; 2616} 2617