1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28#include <linux/seq_file.h> 29#include <linux/slab.h> 30#include "drmP.h" 31#include "radeon_drm.h" 32#include "radeon_reg.h" 33#include "radeon.h" 34#include "atom.h" 35 36int radeon_debugfs_ib_init(struct radeon_device *rdev); 37 38void radeon_ib_bogus_cleanup(struct radeon_device *rdev) 39{ 40 struct radeon_ib *ib, *n; 41 42 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) { 43 list_del(&ib->list); 44 vfree(ib->ptr); 45 kfree(ib); 46 } 47} 48 49void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib) 50{ 51 struct radeon_ib *bib; 52 53 bib = kmalloc(sizeof(*bib), GFP_KERNEL); 54 if (bib == NULL) 55 return; 56 bib->ptr = vmalloc(ib->length_dw * 4); 57 if (bib->ptr == NULL) { 58 kfree(bib); 59 return; 60 } 61 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); 62 bib->length_dw = ib->length_dw; 63 mutex_lock(&rdev->ib_pool.mutex); 64 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib); 65 mutex_unlock(&rdev->ib_pool.mutex); 66} 67 68/* 69 * IB. 70 */ 71int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) 72{ 73 struct radeon_fence *fence; 74 struct radeon_ib *nib; 75 int r = 0, i, c; 76 77 *ib = NULL; 78 r = radeon_fence_create(rdev, &fence); 79 if (r) { 80 dev_err(rdev->dev, "failed to create fence for new IB\n"); 81 return r; 82 } 83 mutex_lock(&rdev->ib_pool.mutex); 84 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { 85 i &= (RADEON_IB_POOL_SIZE - 1); 86 if (rdev->ib_pool.ibs[i].free) { 87 nib = &rdev->ib_pool.ibs[i]; 88 break; 89 } 90 } 91 if (nib == NULL) { 92 /* This should never happen, it means we allocated all 93 * IB and haven't scheduled one yet, return EBUSY to 94 * userspace hoping that on ioctl recall we get better 95 * luck 96 */ 97 dev_err(rdev->dev, "no free indirect buffer !\n"); 98 mutex_unlock(&rdev->ib_pool.mutex); 99 radeon_fence_unref(&fence); 100 return -EBUSY; 101 } 102 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); 103 nib->free = false; 104 if (nib->fence) { 105 mutex_unlock(&rdev->ib_pool.mutex); 106 r = radeon_fence_wait(nib->fence, false); 107 if (r) { 108 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", 109 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); 110 mutex_lock(&rdev->ib_pool.mutex); 111 nib->free = true; 112 mutex_unlock(&rdev->ib_pool.mutex); 113 radeon_fence_unref(&fence); 114 return r; 115 } 116 mutex_lock(&rdev->ib_pool.mutex); 117 } 118 radeon_fence_unref(&nib->fence); 119 nib->fence = fence; 120 nib->length_dw = 0; 121 mutex_unlock(&rdev->ib_pool.mutex); 122 *ib = nib; 123 return 0; 124} 125 126void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 127{ 128 struct radeon_ib *tmp = *ib; 129 130 *ib = NULL; 131 if (tmp == NULL) { 132 return; 133 } 134 if (!tmp->fence->emited) 135 radeon_fence_unref(&tmp->fence); 136 mutex_lock(&rdev->ib_pool.mutex); 137 tmp->free = true; 138 mutex_unlock(&rdev->ib_pool.mutex); 139} 140 141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 142{ 143 int r = 0; 144 145 if (!ib->length_dw || !rdev->cp.ready) { 146 /* TODO: Nothings in the ib we should report. */ 147 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 148 return -EINVAL; 149 } 150 151 /* 64 dwords should be enough for fence too */ 152 r = radeon_ring_lock(rdev, 64); 153 if (r) { 154 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); 155 return r; 156 } 157 radeon_ring_ib_execute(rdev, ib); 158 radeon_fence_emit(rdev, ib->fence); 159 mutex_lock(&rdev->ib_pool.mutex); 160 /* once scheduled IB is considered free and protected by the fence */ 161 ib->free = true; 162 mutex_unlock(&rdev->ib_pool.mutex); 163 radeon_ring_unlock_commit(rdev); 164 return 0; 165} 166 167int radeon_ib_pool_init(struct radeon_device *rdev) 168{ 169 void *ptr; 170 uint64_t gpu_addr; 171 int i; 172 int r = 0; 173 174 if (rdev->ib_pool.robj) 175 return 0; 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 177 /* Allocate 1M object buffer */ 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 179 true, RADEON_GEM_DOMAIN_GTT, 180 &rdev->ib_pool.robj); 181 if (r) { 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 183 return r; 184 } 185 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 186 if (unlikely(r != 0)) 187 return r; 188 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 189 if (r) { 190 radeon_bo_unreserve(rdev->ib_pool.robj); 191 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); 192 return r; 193 } 194 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); 195 radeon_bo_unreserve(rdev->ib_pool.robj); 196 if (r) { 197 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); 198 return r; 199 } 200 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 201 unsigned offset; 202 203 offset = i * 64 * 1024; 204 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; 205 rdev->ib_pool.ibs[i].ptr = ptr + offset; 206 rdev->ib_pool.ibs[i].idx = i; 207 rdev->ib_pool.ibs[i].length_dw = 0; 208 rdev->ib_pool.ibs[i].free = true; 209 } 210 rdev->ib_pool.head_id = 0; 211 rdev->ib_pool.ready = true; 212 DRM_INFO("radeon: ib pool ready.\n"); 213 if (radeon_debugfs_ib_init(rdev)) { 214 DRM_ERROR("Failed to register debugfs file for IB !\n"); 215 } 216 return r; 217} 218 219void radeon_ib_pool_fini(struct radeon_device *rdev) 220{ 221 int r; 222 struct radeon_bo *robj; 223 224 if (!rdev->ib_pool.ready) { 225 return; 226 } 227 mutex_lock(&rdev->ib_pool.mutex); 228 radeon_ib_bogus_cleanup(rdev); 229 robj = rdev->ib_pool.robj; 230 rdev->ib_pool.robj = NULL; 231 mutex_unlock(&rdev->ib_pool.mutex); 232 233 if (robj) { 234 r = radeon_bo_reserve(robj, false); 235 if (likely(r == 0)) { 236 radeon_bo_kunmap(robj); 237 radeon_bo_unpin(robj); 238 radeon_bo_unreserve(robj); 239 } 240 radeon_bo_unref(&robj); 241 } 242} 243 244 245/* 246 * Ring. 247 */ 248void radeon_ring_free_size(struct radeon_device *rdev) 249{ 250 if (rdev->family >= CHIP_R600) 251 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 252 else 253 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 254 /* This works because ring_size is a power of 2 */ 255 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); 256 rdev->cp.ring_free_dw -= rdev->cp.wptr; 257 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask; 258 if (!rdev->cp.ring_free_dw) { 259 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 260 } 261} 262 263int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) 264{ 265 int r; 266 267 /* Align requested size with padding so unlock_commit can 268 * pad safely */ 269 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; 270 while (ndw > (rdev->cp.ring_free_dw - 1)) { 271 radeon_ring_free_size(rdev); 272 if (ndw < rdev->cp.ring_free_dw) { 273 break; 274 } 275 r = radeon_fence_wait_next(rdev); 276 if (r) 277 return r; 278 } 279 rdev->cp.count_dw = ndw; 280 rdev->cp.wptr_old = rdev->cp.wptr; 281 return 0; 282} 283 284int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) 285{ 286 int r; 287 288 mutex_lock(&rdev->cp.mutex); 289 r = radeon_ring_alloc(rdev, ndw); 290 if (r) { 291 mutex_unlock(&rdev->cp.mutex); 292 return r; 293 } 294 return 0; 295} 296 297void radeon_ring_commit(struct radeon_device *rdev) 298{ 299 unsigned count_dw_pad; 300 unsigned i; 301 302 /* We pad to match fetch size */ 303 count_dw_pad = (rdev->cp.align_mask + 1) - 304 (rdev->cp.wptr & rdev->cp.align_mask); 305 for (i = 0; i < count_dw_pad; i++) { 306 radeon_ring_write(rdev, 2 << 30); 307 } 308 DRM_MEMORYBARRIER(); 309 radeon_cp_commit(rdev); 310} 311 312void radeon_ring_unlock_commit(struct radeon_device *rdev) 313{ 314 radeon_ring_commit(rdev); 315 mutex_unlock(&rdev->cp.mutex); 316} 317 318void radeon_ring_unlock_undo(struct radeon_device *rdev) 319{ 320 rdev->cp.wptr = rdev->cp.wptr_old; 321 mutex_unlock(&rdev->cp.mutex); 322} 323 324int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) 325{ 326 int r; 327 328 rdev->cp.ring_size = ring_size; 329 /* Allocate ring buffer */ 330 if (rdev->cp.ring_obj == NULL) { 331 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, 332 RADEON_GEM_DOMAIN_GTT, 333 &rdev->cp.ring_obj); 334 if (r) { 335 dev_err(rdev->dev, "(%d) ring create failed\n", r); 336 return r; 337 } 338 r = radeon_bo_reserve(rdev->cp.ring_obj, false); 339 if (unlikely(r != 0)) 340 return r; 341 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, 342 &rdev->cp.gpu_addr); 343 if (r) { 344 radeon_bo_unreserve(rdev->cp.ring_obj); 345 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 346 return r; 347 } 348 r = radeon_bo_kmap(rdev->cp.ring_obj, 349 (void **)&rdev->cp.ring); 350 radeon_bo_unreserve(rdev->cp.ring_obj); 351 if (r) { 352 dev_err(rdev->dev, "(%d) ring map failed\n", r); 353 return r; 354 } 355 } 356 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; 357 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 358 return 0; 359} 360 361void radeon_ring_fini(struct radeon_device *rdev) 362{ 363 int r; 364 struct radeon_bo *ring_obj; 365 366 mutex_lock(&rdev->cp.mutex); 367 ring_obj = rdev->cp.ring_obj; 368 rdev->cp.ring = NULL; 369 rdev->cp.ring_obj = NULL; 370 mutex_unlock(&rdev->cp.mutex); 371 372 if (ring_obj) { 373 r = radeon_bo_reserve(ring_obj, false); 374 if (likely(r == 0)) { 375 radeon_bo_kunmap(ring_obj); 376 radeon_bo_unpin(ring_obj); 377 radeon_bo_unreserve(ring_obj); 378 } 379 radeon_bo_unref(&ring_obj); 380 } 381} 382 383 384/* 385 * Debugfs info 386 */ 387#if defined(CONFIG_DEBUG_FS) 388static int radeon_debugfs_ib_info(struct seq_file *m, void *data) 389{ 390 struct drm_info_node *node = (struct drm_info_node *) m->private; 391 struct radeon_ib *ib = node->info_ent->data; 392 unsigned i; 393 394 if (ib == NULL) { 395 return 0; 396 } 397 seq_printf(m, "IB %04u\n", ib->idx); 398 seq_printf(m, "IB fence %p\n", ib->fence); 399 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 400 for (i = 0; i < ib->length_dw; i++) { 401 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); 402 } 403 return 0; 404} 405 406static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) 407{ 408 struct drm_info_node *node = (struct drm_info_node *) m->private; 409 struct radeon_device *rdev = node->info_ent->data; 410 struct radeon_ib *ib; 411 unsigned i; 412 413 mutex_lock(&rdev->ib_pool.mutex); 414 if (list_empty(&rdev->ib_pool.bogus_ib)) { 415 mutex_unlock(&rdev->ib_pool.mutex); 416 seq_printf(m, "no bogus IB recorded\n"); 417 return 0; 418 } 419 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); 420 list_del_init(&ib->list); 421 mutex_unlock(&rdev->ib_pool.mutex); 422 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 423 for (i = 0; i < ib->length_dw; i++) { 424 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); 425 } 426 vfree(ib->ptr); 427 kfree(ib); 428 return 0; 429} 430 431static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 432static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 433 434static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { 435 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, 436}; 437#endif 438 439int radeon_debugfs_ib_init(struct radeon_device *rdev) 440{ 441#if defined(CONFIG_DEBUG_FS) 442 unsigned i; 443 int r; 444 445 radeon_debugfs_ib_bogus_info_list[0].data = rdev; 446 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); 447 if (r) 448 return r; 449 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 450 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 451 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 452 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; 453 radeon_debugfs_ib_list[i].driver_features = 0; 454 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; 455 } 456 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, 457 RADEON_IB_POOL_SIZE); 458#else 459 return 0; 460#endif 461} 462