radeon_object.c revision 280183
1/* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/drm2/radeon/radeon_object.c 280183 2015-03-17 18:50:33Z dumbbell $"); 35 36#include <dev/drm2/drmP.h> 37#include <dev/drm2/radeon/radeon_drm.h> 38#include "radeon.h" 39#ifdef FREEBSD_WIP 40#include "radeon_trace.h" 41#endif /* FREEBSD_WIP */ 42 43 44#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */ 45int radeon_ttm_init(struct radeon_device *rdev); 46void radeon_ttm_fini(struct radeon_device *rdev); 47#endif 48static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 49 50/* 51 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 52 * function are calling it. 53 */ 54 55static void radeon_bo_clear_va(struct radeon_bo *bo) 56{ 57 struct radeon_bo_va *bo_va, *tmp; 58 59 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { 60 /* remove from all vm address space */ 61 radeon_vm_bo_rmv(bo->rdev, bo_va); 62 } 63} 64 65static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 66{ 67 struct radeon_bo *bo; 68 69 bo = container_of(tbo, struct radeon_bo, tbo); 70 sx_xlock(&bo->rdev->gem.mutex); 71 list_del_init(&bo->list); 72 sx_xunlock(&bo->rdev->gem.mutex); 73 radeon_bo_clear_surface_reg(bo); 74 radeon_bo_clear_va(bo); 75 drm_gem_object_release(&bo->gem_base); 76 free(bo, DRM_MEM_DRIVER); 77} 78 79bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 80{ 81 if (bo->destroy == &radeon_ttm_bo_destroy) 82 return true; 83 return false; 84} 85 86void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 87{ 88 u32 c = 0; 89 90 rbo->placement.fpfn = 0; 91 rbo->placement.lpfn = 0; 92 rbo->placement.placement = rbo->placements; 93 rbo->placement.busy_placement = rbo->placements; 94 if (domain & RADEON_GEM_DOMAIN_VRAM) 95 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 96 TTM_PL_FLAG_VRAM; 97 if (domain & RADEON_GEM_DOMAIN_GTT) { 98 if (rbo->rdev->flags & RADEON_IS_AGP) { 99 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; 100 } else { 101 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 102 } 103 } 104 if (domain & RADEON_GEM_DOMAIN_CPU) { 105 if (rbo->rdev->flags & RADEON_IS_AGP) { 106 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; 107 } else { 108 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 109 } 110 } 111 if (!c) 112 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 113 rbo->placement.num_placement = c; 114 rbo->placement.num_busy_placement = c; 115} 116 117int radeon_bo_create(struct radeon_device *rdev, 118 unsigned long size, int byte_align, bool kernel, u32 domain, 119 struct sg_table *sg, struct radeon_bo **bo_ptr) 120{ 121 struct radeon_bo *bo; 122 enum ttm_bo_type type; 123 unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 124 size_t acc_size; 125 int r; 126 127 size = roundup2(size, PAGE_SIZE); 128 129 if (kernel) { 130 type = ttm_bo_type_kernel; 131 } else if (sg) { 132 type = ttm_bo_type_sg; 133 } else { 134 type = ttm_bo_type_device; 135 } 136 *bo_ptr = NULL; 137 138 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 139 sizeof(struct radeon_bo)); 140 141 bo = malloc(sizeof(struct radeon_bo), 142 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); 143 if (bo == NULL) 144 return -ENOMEM; 145 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 146 if (unlikely(r)) { 147 free(bo, DRM_MEM_DRIVER); 148 return r; 149 } 150 bo->rdev = rdev; 151 bo->gem_base.driver_private = NULL; 152 bo->surface_reg = -1; 153 INIT_LIST_HEAD(&bo->list); 154 INIT_LIST_HEAD(&bo->va); 155 radeon_ttm_placement_from_domain(bo, domain); 156 /* Kernel allocation are uninterruptible */ 157 sx_slock(&rdev->pm.mclk_lock); 158 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 159 &bo->placement, page_align, !kernel, NULL, 160 acc_size, sg, &radeon_ttm_bo_destroy); 161 sx_sunlock(&rdev->pm.mclk_lock); 162 if (unlikely(r != 0)) { 163 return r; 164 } 165 *bo_ptr = bo; 166 167#ifdef FREEBSD_WIP 168 trace_radeon_bo_create(bo); 169#endif /* FREEBSD_WIP */ 170 171 return 0; 172} 173 174int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 175{ 176 bool is_iomem; 177 int r; 178 179 if (bo->kptr) { 180 if (ptr) { 181 *ptr = bo->kptr; 182 } 183 return 0; 184 } 185 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 186 if (r) { 187 return r; 188 } 189 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 190 if (ptr) { 191 *ptr = bo->kptr; 192 } 193 radeon_bo_check_tiling(bo, 0, 0); 194 return 0; 195} 196 197void radeon_bo_kunmap(struct radeon_bo *bo) 198{ 199 if (bo->kptr == NULL) 200 return; 201 bo->kptr = NULL; 202 radeon_bo_check_tiling(bo, 0, 0); 203 ttm_bo_kunmap(&bo->kmap); 204} 205 206void radeon_bo_unref(struct radeon_bo **bo) 207{ 208 struct ttm_buffer_object *tbo; 209 struct radeon_device *rdev; 210 211 if ((*bo) == NULL) 212 return; 213 rdev = (*bo)->rdev; 214 tbo = &((*bo)->tbo); 215 sx_slock(&rdev->pm.mclk_lock); 216 ttm_bo_unref(&tbo); 217 sx_sunlock(&rdev->pm.mclk_lock); 218 if (tbo == NULL) 219 *bo = NULL; 220} 221 222int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 223 u64 *gpu_addr) 224{ 225 int r, i; 226 227 if (bo->pin_count) { 228 bo->pin_count++; 229 if (gpu_addr) 230 *gpu_addr = radeon_bo_gpu_offset(bo); 231 232 if (max_offset != 0) { 233 u64 domain_start; 234 235 if (domain == RADEON_GEM_DOMAIN_VRAM) 236 domain_start = bo->rdev->mc.vram_start; 237 else 238 domain_start = bo->rdev->mc.gtt_start; 239 if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { 240 DRM_ERROR("radeon_bo_pin_restricted: " 241 "max_offset(%ju) < " 242 "(radeon_bo_gpu_offset(%ju) - " 243 "domain_start(%ju)", 244 (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), 245 (uintmax_t)domain_start); 246 } 247 } 248 249 return 0; 250 } 251 radeon_ttm_placement_from_domain(bo, domain); 252 if (domain == RADEON_GEM_DOMAIN_VRAM) { 253 /* force to pin into visible video ram */ 254 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 255 } 256 if (max_offset) { 257 u64 lpfn = max_offset >> PAGE_SHIFT; 258 259 if (!bo->placement.lpfn) 260 bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; 261 262 if (lpfn < bo->placement.lpfn) 263 bo->placement.lpfn = lpfn; 264 } 265 for (i = 0; i < bo->placement.num_placement; i++) 266 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 267 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 268 if (likely(r == 0)) { 269 bo->pin_count = 1; 270 if (gpu_addr != NULL) 271 *gpu_addr = radeon_bo_gpu_offset(bo); 272 } 273 if (unlikely(r != 0)) 274 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 275 return r; 276} 277 278int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 279{ 280 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 281} 282 283int radeon_bo_unpin(struct radeon_bo *bo) 284{ 285 int r, i; 286 287 if (!bo->pin_count) { 288 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 289 return 0; 290 } 291 bo->pin_count--; 292 if (bo->pin_count) 293 return 0; 294 for (i = 0; i < bo->placement.num_placement; i++) 295 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 296 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 297 if (unlikely(r != 0)) 298 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 299 return r; 300} 301 302int radeon_bo_evict_vram(struct radeon_device *rdev) 303{ 304 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 305 if (0 && (rdev->flags & RADEON_IS_IGP)) { 306 if (rdev->mc.igp_sideport_enabled == false) 307 /* Useless to evict on IGP chips */ 308 return 0; 309 } 310 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 311} 312 313void radeon_bo_force_delete(struct radeon_device *rdev) 314{ 315 struct radeon_bo *bo, *n; 316 317 if (list_empty(&rdev->gem.objects)) { 318 return; 319 } 320 dev_err(rdev->dev, "Userspace still has active objects !\n"); 321 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 322 DRM_LOCK(rdev->ddev); 323 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 324 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 325 *((unsigned long *)&bo->gem_base.refcount)); 326 sx_xlock(&bo->rdev->gem.mutex); 327 list_del_init(&bo->list); 328 sx_xunlock(&bo->rdev->gem.mutex); 329 /* this should unref the ttm bo */ 330 drm_gem_object_unreference(&bo->gem_base); 331 DRM_UNLOCK(rdev->ddev); 332 } 333} 334 335int radeon_bo_init(struct radeon_device *rdev) 336{ 337 /* Add an MTRR for the VRAM */ 338 rdev->mc.vram_mtrr = drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 339 DRM_MTRR_WC); 340 DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n", 341 (uintmax_t)rdev->mc.mc_vram_size >> 20, 342 (uintmax_t)rdev->mc.aper_size >> 20); 343 DRM_INFO("RAM width %dbits %cDR\n", 344 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 345 return radeon_ttm_init(rdev); 346} 347 348void radeon_bo_fini(struct radeon_device *rdev) 349{ 350 radeon_ttm_fini(rdev); 351} 352 353void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 354 struct list_head *head) 355{ 356 if (lobj->wdomain) { 357 list_add(&lobj->tv.head, head); 358 } else { 359 list_add_tail(&lobj->tv.head, head); 360 } 361} 362 363int radeon_bo_list_validate(struct list_head *head) 364{ 365 struct radeon_bo_list *lobj; 366 struct radeon_bo *bo; 367 u32 domain; 368 int r; 369 370 r = ttm_eu_reserve_buffers(head); 371 if (unlikely(r != 0)) { 372 return r; 373 } 374 list_for_each_entry(lobj, head, tv.head) { 375 bo = lobj->bo; 376 if (!bo->pin_count) { 377 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 378 379 retry: 380 radeon_ttm_placement_from_domain(bo, domain); 381 r = ttm_bo_validate(&bo->tbo, &bo->placement, 382 true, false); 383 if (unlikely(r)) { 384 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 385 domain |= RADEON_GEM_DOMAIN_GTT; 386 goto retry; 387 } 388 return r; 389 } 390 } 391 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 392 lobj->tiling_flags = bo->tiling_flags; 393 } 394 return 0; 395} 396 397#ifdef FREEBSD_WIP 398int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 399 struct vm_area_struct *vma) 400{ 401 return ttm_fbdev_mmap(vma, &bo->tbo); 402} 403#endif /* FREEBSD_WIP */ 404 405int radeon_bo_get_surface_reg(struct radeon_bo *bo) 406{ 407 struct radeon_device *rdev = bo->rdev; 408 struct radeon_surface_reg *reg; 409 struct radeon_bo *old_object; 410 int steal; 411 int i; 412 413 KASSERT(radeon_bo_is_reserved(bo), 414 ("radeon_bo_get_surface_reg: radeon_bo is not reserved")); 415 416 if (!bo->tiling_flags) 417 return 0; 418 419 if (bo->surface_reg >= 0) { 420 reg = &rdev->surface_regs[bo->surface_reg]; 421 i = bo->surface_reg; 422 goto out; 423 } 424 425 steal = -1; 426 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 427 428 reg = &rdev->surface_regs[i]; 429 if (!reg->bo) 430 break; 431 432 old_object = reg->bo; 433 if (old_object->pin_count == 0) 434 steal = i; 435 } 436 437 /* if we are all out */ 438 if (i == RADEON_GEM_MAX_SURFACES) { 439 if (steal == -1) 440 return -ENOMEM; 441 /* find someone with a surface reg and nuke their BO */ 442 reg = &rdev->surface_regs[steal]; 443 old_object = reg->bo; 444 /* blow away the mapping */ 445 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 446 ttm_bo_unmap_virtual(&old_object->tbo); 447 old_object->surface_reg = -1; 448 i = steal; 449 } 450 451 bo->surface_reg = i; 452 reg->bo = bo; 453 454out: 455 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 456 bo->tbo.mem.start << PAGE_SHIFT, 457 bo->tbo.num_pages << PAGE_SHIFT); 458 return 0; 459} 460 461static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 462{ 463 struct radeon_device *rdev = bo->rdev; 464 struct radeon_surface_reg *reg; 465 466 if (bo->surface_reg == -1) 467 return; 468 469 reg = &rdev->surface_regs[bo->surface_reg]; 470 radeon_clear_surface_reg(rdev, bo->surface_reg); 471 472 reg->bo = NULL; 473 bo->surface_reg = -1; 474} 475 476int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 477 uint32_t tiling_flags, uint32_t pitch) 478{ 479 struct radeon_device *rdev = bo->rdev; 480 int r; 481 482 if (rdev->family >= CHIP_CEDAR) { 483 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 484 485 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 486 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 487 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 488 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 489 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 490 switch (bankw) { 491 case 0: 492 case 1: 493 case 2: 494 case 4: 495 case 8: 496 break; 497 default: 498 return -EINVAL; 499 } 500 switch (bankh) { 501 case 0: 502 case 1: 503 case 2: 504 case 4: 505 case 8: 506 break; 507 default: 508 return -EINVAL; 509 } 510 switch (mtaspect) { 511 case 0: 512 case 1: 513 case 2: 514 case 4: 515 case 8: 516 break; 517 default: 518 return -EINVAL; 519 } 520 if (tilesplit > 6) { 521 return -EINVAL; 522 } 523 if (stilesplit > 6) { 524 return -EINVAL; 525 } 526 } 527 r = radeon_bo_reserve(bo, false); 528 if (unlikely(r != 0)) 529 return r; 530 bo->tiling_flags = tiling_flags; 531 bo->pitch = pitch; 532 radeon_bo_unreserve(bo); 533 return 0; 534} 535 536void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 537 uint32_t *tiling_flags, 538 uint32_t *pitch) 539{ 540 KASSERT(radeon_bo_is_reserved(bo), 541 ("radeon_bo_get_tiling_flags: radeon_bo is not reserved")); 542 if (tiling_flags) 543 *tiling_flags = bo->tiling_flags; 544 if (pitch) 545 *pitch = bo->pitch; 546} 547 548int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 549 bool force_drop) 550{ 551 KASSERT((radeon_bo_is_reserved(bo) || force_drop), 552 ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop")); 553 554 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 555 return 0; 556 557 if (force_drop) { 558 radeon_bo_clear_surface_reg(bo); 559 return 0; 560 } 561 562 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 563 if (!has_moved) 564 return 0; 565 566 if (bo->surface_reg >= 0) 567 radeon_bo_clear_surface_reg(bo); 568 return 0; 569 } 570 571 if ((bo->surface_reg >= 0) && !has_moved) 572 return 0; 573 574 return radeon_bo_get_surface_reg(bo); 575} 576 577void radeon_bo_move_notify(struct ttm_buffer_object *bo, 578 struct ttm_mem_reg *mem) 579{ 580 struct radeon_bo *rbo; 581 if (!radeon_ttm_bo_is_radeon_bo(bo)) 582 return; 583 rbo = container_of(bo, struct radeon_bo, tbo); 584 radeon_bo_check_tiling(rbo, 0, 1); 585 radeon_vm_bo_invalidate(rbo->rdev, rbo); 586} 587 588int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 589{ 590 struct radeon_device *rdev; 591 struct radeon_bo *rbo; 592 unsigned long offset, size; 593 int r; 594 595 if (!radeon_ttm_bo_is_radeon_bo(bo)) 596 return 0; 597 rbo = container_of(bo, struct radeon_bo, tbo); 598 radeon_bo_check_tiling(rbo, 0, 0); 599 rdev = rbo->rdev; 600 if (bo->mem.mem_type == TTM_PL_VRAM) { 601 size = bo->mem.num_pages << PAGE_SHIFT; 602 offset = bo->mem.start << PAGE_SHIFT; 603 if ((offset + size) > rdev->mc.visible_vram_size) { 604 /* hurrah the memory is not visible ! */ 605 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 606 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 607 r = ttm_bo_validate(bo, &rbo->placement, false, false); 608 if (unlikely(r != 0)) 609 return r; 610 offset = bo->mem.start << PAGE_SHIFT; 611 /* this should not happen */ 612 if ((offset + size) > rdev->mc.visible_vram_size) 613 return -EINVAL; 614 } 615 } 616 return 0; 617} 618 619int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 620{ 621 int r; 622 623 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 624 if (unlikely(r != 0)) 625 return r; 626 mtx_lock(&bo->tbo.bdev->fence_lock); 627 if (mem_type) 628 *mem_type = bo->tbo.mem.mem_type; 629 if (bo->tbo.sync_obj) 630 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 631 mtx_unlock(&bo->tbo.bdev->fence_lock); 632 ttm_bo_unreserve(&bo->tbo); 633 return r; 634} 635 636 637/** 638 * radeon_bo_reserve - reserve bo 639 * @bo: bo structure 640 * @no_intr: don't return -ERESTARTSYS on pending signal 641 * 642 * Returns: 643 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 644 * a signal. Release all buffer reservations and return to user-space. 645 */ 646int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 647{ 648 int r; 649 650 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 651 if (unlikely(r != 0)) { 652 if (r != -ERESTARTSYS) 653 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 654 return r; 655 } 656 return 0; 657} 658