1254885Sdumbbell/* 2254885Sdumbbell * Copyright 2009 Jerome Glisse. 3254885Sdumbbell * All Rights Reserved. 4254885Sdumbbell * 5254885Sdumbbell * Permission is hereby granted, free of charge, to any person obtaining a 6254885Sdumbbell * copy of this software and associated documentation files (the 7254885Sdumbbell * "Software"), to deal in the Software without restriction, including 8254885Sdumbbell * without limitation the rights to use, copy, modify, merge, publish, 9254885Sdumbbell * distribute, sub license, and/or sell copies of the Software, and to 10254885Sdumbbell * permit persons to whom the Software is furnished to do so, subject to 11254885Sdumbbell * the following conditions: 12254885Sdumbbell * 13254885Sdumbbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14254885Sdumbbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15254885Sdumbbell * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16254885Sdumbbell * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17254885Sdumbbell * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18254885Sdumbbell * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19254885Sdumbbell * USE OR OTHER DEALINGS IN THE SOFTWARE. 20254885Sdumbbell * 21254885Sdumbbell * The above copyright notice and this permission notice (including the 22254885Sdumbbell * next paragraph) shall be included in all copies or substantial portions 23254885Sdumbbell * of the Software. 24254885Sdumbbell * 25254885Sdumbbell */ 26254885Sdumbbell/* 27254885Sdumbbell * Authors: 28254885Sdumbbell * Jerome Glisse <glisse@freedesktop.org> 29254885Sdumbbell * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30254885Sdumbbell * Dave Airlie 31254885Sdumbbell */ 32254885Sdumbbell 33254885Sdumbbell#include <sys/cdefs.h> 34254885Sdumbbell__FBSDID("$FreeBSD$"); 35254885Sdumbbell 36254885Sdumbbell#include <dev/drm2/drmP.h> 37254885Sdumbbell#include <dev/drm2/radeon/radeon_drm.h> 38254885Sdumbbell#include "radeon.h" 39280183Sdumbbell#ifdef FREEBSD_WIP 40254885Sdumbbell#include "radeon_trace.h" 41280183Sdumbbell#endif /* FREEBSD_WIP */ 42254885Sdumbbell 43254885Sdumbbell 44280183Sdumbbell#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */ 45280183Sdumbbellint radeon_ttm_init(struct radeon_device *rdev); 46280183Sdumbbellvoid radeon_ttm_fini(struct radeon_device *rdev); 47280183Sdumbbell#endif 48254885Sdumbbellstatic void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 49254885Sdumbbell 50254885Sdumbbell/* 51254885Sdumbbell * To exclude mutual BO access we rely on bo_reserve exclusion, as all 52254885Sdumbbell * function are calling it. 53254885Sdumbbell */ 54254885Sdumbbell 55254885Sdumbbellstatic void radeon_bo_clear_va(struct radeon_bo *bo) 56254885Sdumbbell{ 57254885Sdumbbell struct radeon_bo_va *bo_va, *tmp; 58254885Sdumbbell 59254885Sdumbbell list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { 60254885Sdumbbell /* remove from all vm address space */ 61254885Sdumbbell radeon_vm_bo_rmv(bo->rdev, bo_va); 62254885Sdumbbell } 63254885Sdumbbell} 64254885Sdumbbell 65254885Sdumbbellstatic void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 66254885Sdumbbell{ 67254885Sdumbbell struct radeon_bo *bo; 68254885Sdumbbell 69254885Sdumbbell bo = container_of(tbo, struct radeon_bo, tbo); 70254885Sdumbbell sx_xlock(&bo->rdev->gem.mutex); 71254885Sdumbbell list_del_init(&bo->list); 72254885Sdumbbell sx_xunlock(&bo->rdev->gem.mutex); 73254885Sdumbbell radeon_bo_clear_surface_reg(bo); 74254885Sdumbbell radeon_bo_clear_va(bo); 75254885Sdumbbell drm_gem_object_release(&bo->gem_base); 76254885Sdumbbell free(bo, DRM_MEM_DRIVER); 77254885Sdumbbell} 78254885Sdumbbell 79254885Sdumbbellbool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 80254885Sdumbbell{ 81254885Sdumbbell if (bo->destroy == &radeon_ttm_bo_destroy) 82254885Sdumbbell return true; 83254885Sdumbbell return false; 84254885Sdumbbell} 85254885Sdumbbell 86254885Sdumbbellvoid radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 87254885Sdumbbell{ 88254885Sdumbbell u32 c = 0; 89254885Sdumbbell 90254885Sdumbbell rbo->placement.fpfn = 0; 91254885Sdumbbell rbo->placement.lpfn = 0; 92254885Sdumbbell rbo->placement.placement = rbo->placements; 93254885Sdumbbell rbo->placement.busy_placement = rbo->placements; 94254885Sdumbbell if (domain & RADEON_GEM_DOMAIN_VRAM) 95254885Sdumbbell rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 96254885Sdumbbell TTM_PL_FLAG_VRAM; 97254885Sdumbbell if (domain & RADEON_GEM_DOMAIN_GTT) { 98254885Sdumbbell if (rbo->rdev->flags & RADEON_IS_AGP) { 99254885Sdumbbell rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; 100254885Sdumbbell } else { 101254885Sdumbbell rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 102254885Sdumbbell } 103254885Sdumbbell } 104254885Sdumbbell if (domain & RADEON_GEM_DOMAIN_CPU) { 105254885Sdumbbell if (rbo->rdev->flags & RADEON_IS_AGP) { 106254885Sdumbbell rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; 107254885Sdumbbell } else { 108254885Sdumbbell rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 109254885Sdumbbell } 110254885Sdumbbell } 111254885Sdumbbell if (!c) 112254885Sdumbbell rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 113254885Sdumbbell rbo->placement.num_placement = c; 114254885Sdumbbell rbo->placement.num_busy_placement = c; 115254885Sdumbbell} 116254885Sdumbbell 117254885Sdumbbellint radeon_bo_create(struct radeon_device *rdev, 118254885Sdumbbell unsigned long size, int byte_align, bool kernel, u32 domain, 119254885Sdumbbell struct sg_table *sg, struct radeon_bo **bo_ptr) 120254885Sdumbbell{ 121254885Sdumbbell struct radeon_bo *bo; 122254885Sdumbbell enum ttm_bo_type type; 123254885Sdumbbell unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 124254885Sdumbbell size_t acc_size; 125254885Sdumbbell int r; 126254885Sdumbbell 127254885Sdumbbell size = roundup2(size, PAGE_SIZE); 128254885Sdumbbell 129254885Sdumbbell if (kernel) { 130254885Sdumbbell type = ttm_bo_type_kernel; 131254885Sdumbbell } else if (sg) { 132254885Sdumbbell type = ttm_bo_type_sg; 133254885Sdumbbell } else { 134254885Sdumbbell type = ttm_bo_type_device; 135254885Sdumbbell } 136254885Sdumbbell *bo_ptr = NULL; 137254885Sdumbbell 138254885Sdumbbell acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 139254885Sdumbbell sizeof(struct radeon_bo)); 140254885Sdumbbell 141254885Sdumbbell bo = malloc(sizeof(struct radeon_bo), 142280183Sdumbbell DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); 143254885Sdumbbell if (bo == NULL) 144254885Sdumbbell return -ENOMEM; 145254885Sdumbbell r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 146254885Sdumbbell if (unlikely(r)) { 147254885Sdumbbell free(bo, DRM_MEM_DRIVER); 148254885Sdumbbell return r; 149254885Sdumbbell } 150254885Sdumbbell bo->rdev = rdev; 151254885Sdumbbell bo->gem_base.driver_private = NULL; 152254885Sdumbbell bo->surface_reg = -1; 153254885Sdumbbell INIT_LIST_HEAD(&bo->list); 154254885Sdumbbell INIT_LIST_HEAD(&bo->va); 155254885Sdumbbell radeon_ttm_placement_from_domain(bo, domain); 156254885Sdumbbell /* Kernel allocation are uninterruptible */ 157254885Sdumbbell sx_slock(&rdev->pm.mclk_lock); 158254885Sdumbbell r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 159254885Sdumbbell &bo->placement, page_align, !kernel, NULL, 160254885Sdumbbell acc_size, sg, &radeon_ttm_bo_destroy); 161254885Sdumbbell sx_sunlock(&rdev->pm.mclk_lock); 162254885Sdumbbell if (unlikely(r != 0)) { 163254885Sdumbbell return r; 164254885Sdumbbell } 165254885Sdumbbell *bo_ptr = bo; 166254885Sdumbbell 167280183Sdumbbell#ifdef FREEBSD_WIP 168254885Sdumbbell trace_radeon_bo_create(bo); 169280183Sdumbbell#endif /* FREEBSD_WIP */ 170254885Sdumbbell 171254885Sdumbbell return 0; 172254885Sdumbbell} 173254885Sdumbbell 174254885Sdumbbellint radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 175254885Sdumbbell{ 176254885Sdumbbell bool is_iomem; 177254885Sdumbbell int r; 178254885Sdumbbell 179254885Sdumbbell if (bo->kptr) { 180254885Sdumbbell if (ptr) { 181254885Sdumbbell *ptr = bo->kptr; 182254885Sdumbbell } 183254885Sdumbbell return 0; 184254885Sdumbbell } 185254885Sdumbbell r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 186254885Sdumbbell if (r) { 187254885Sdumbbell return r; 188254885Sdumbbell } 189254885Sdumbbell bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 190254885Sdumbbell if (ptr) { 191254885Sdumbbell *ptr = bo->kptr; 192254885Sdumbbell } 193254885Sdumbbell radeon_bo_check_tiling(bo, 0, 0); 194254885Sdumbbell return 0; 195254885Sdumbbell} 196254885Sdumbbell 197254885Sdumbbellvoid radeon_bo_kunmap(struct radeon_bo *bo) 198254885Sdumbbell{ 199254885Sdumbbell if (bo->kptr == NULL) 200254885Sdumbbell return; 201254885Sdumbbell bo->kptr = NULL; 202254885Sdumbbell radeon_bo_check_tiling(bo, 0, 0); 203254885Sdumbbell ttm_bo_kunmap(&bo->kmap); 204254885Sdumbbell} 205254885Sdumbbell 206254885Sdumbbellvoid radeon_bo_unref(struct radeon_bo **bo) 207254885Sdumbbell{ 208254885Sdumbbell struct ttm_buffer_object *tbo; 209254885Sdumbbell struct radeon_device *rdev; 210254885Sdumbbell 211254885Sdumbbell if ((*bo) == NULL) 212254885Sdumbbell return; 213254885Sdumbbell rdev = (*bo)->rdev; 214254885Sdumbbell tbo = &((*bo)->tbo); 215254885Sdumbbell sx_slock(&rdev->pm.mclk_lock); 216254885Sdumbbell ttm_bo_unref(&tbo); 217254885Sdumbbell sx_sunlock(&rdev->pm.mclk_lock); 218254885Sdumbbell if (tbo == NULL) 219254885Sdumbbell *bo = NULL; 220254885Sdumbbell} 221254885Sdumbbell 222254885Sdumbbellint radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 223254885Sdumbbell u64 *gpu_addr) 224254885Sdumbbell{ 225254885Sdumbbell int r, i; 226254885Sdumbbell 227254885Sdumbbell if (bo->pin_count) { 228254885Sdumbbell bo->pin_count++; 229254885Sdumbbell if (gpu_addr) 230254885Sdumbbell *gpu_addr = radeon_bo_gpu_offset(bo); 231254885Sdumbbell 232254885Sdumbbell if (max_offset != 0) { 233254885Sdumbbell u64 domain_start; 234254885Sdumbbell 235254885Sdumbbell if (domain == RADEON_GEM_DOMAIN_VRAM) 236254885Sdumbbell domain_start = bo->rdev->mc.vram_start; 237254885Sdumbbell else 238254885Sdumbbell domain_start = bo->rdev->mc.gtt_start; 239254885Sdumbbell if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { 240254885Sdumbbell DRM_ERROR("radeon_bo_pin_restricted: " 241254885Sdumbbell "max_offset(%ju) < " 242254885Sdumbbell "(radeon_bo_gpu_offset(%ju) - " 243254885Sdumbbell "domain_start(%ju)", 244254885Sdumbbell (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), 245254885Sdumbbell (uintmax_t)domain_start); 246254885Sdumbbell } 247254885Sdumbbell } 248254885Sdumbbell 249254885Sdumbbell return 0; 250254885Sdumbbell } 251254885Sdumbbell radeon_ttm_placement_from_domain(bo, domain); 252254885Sdumbbell if (domain == RADEON_GEM_DOMAIN_VRAM) { 253254885Sdumbbell /* force to pin into visible video ram */ 254254885Sdumbbell bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 255254885Sdumbbell } 256254885Sdumbbell if (max_offset) { 257254885Sdumbbell u64 lpfn = max_offset >> PAGE_SHIFT; 258254885Sdumbbell 259254885Sdumbbell if (!bo->placement.lpfn) 260254885Sdumbbell bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; 261254885Sdumbbell 262254885Sdumbbell if (lpfn < bo->placement.lpfn) 263254885Sdumbbell bo->placement.lpfn = lpfn; 264254885Sdumbbell } 265254885Sdumbbell for (i = 0; i < bo->placement.num_placement; i++) 266254885Sdumbbell bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 267254885Sdumbbell r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 268254885Sdumbbell if (likely(r == 0)) { 269254885Sdumbbell bo->pin_count = 1; 270254885Sdumbbell if (gpu_addr != NULL) 271254885Sdumbbell *gpu_addr = radeon_bo_gpu_offset(bo); 272254885Sdumbbell } 273254885Sdumbbell if (unlikely(r != 0)) 274254885Sdumbbell dev_err(bo->rdev->dev, "%p pin failed\n", bo); 275254885Sdumbbell return r; 276254885Sdumbbell} 277254885Sdumbbell 278254885Sdumbbellint radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 279254885Sdumbbell{ 280254885Sdumbbell return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 281254885Sdumbbell} 282254885Sdumbbell 283254885Sdumbbellint radeon_bo_unpin(struct radeon_bo *bo) 284254885Sdumbbell{ 285254885Sdumbbell int r, i; 286254885Sdumbbell 287254885Sdumbbell if (!bo->pin_count) { 288254885Sdumbbell dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 289254885Sdumbbell return 0; 290254885Sdumbbell } 291254885Sdumbbell bo->pin_count--; 292254885Sdumbbell if (bo->pin_count) 293254885Sdumbbell return 0; 294254885Sdumbbell for (i = 0; i < bo->placement.num_placement; i++) 295254885Sdumbbell bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 296254885Sdumbbell r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 297254885Sdumbbell if (unlikely(r != 0)) 298254885Sdumbbell dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 299254885Sdumbbell return r; 300254885Sdumbbell} 301254885Sdumbbell 302254885Sdumbbellint radeon_bo_evict_vram(struct radeon_device *rdev) 303254885Sdumbbell{ 304254885Sdumbbell /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 305254885Sdumbbell if (0 && (rdev->flags & RADEON_IS_IGP)) { 306254885Sdumbbell if (rdev->mc.igp_sideport_enabled == false) 307254885Sdumbbell /* Useless to evict on IGP chips */ 308254885Sdumbbell return 0; 309254885Sdumbbell } 310254885Sdumbbell return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 311254885Sdumbbell} 312254885Sdumbbell 313254885Sdumbbellvoid radeon_bo_force_delete(struct radeon_device *rdev) 314254885Sdumbbell{ 315254885Sdumbbell struct radeon_bo *bo, *n; 316254885Sdumbbell 317254885Sdumbbell if (list_empty(&rdev->gem.objects)) { 318254885Sdumbbell return; 319254885Sdumbbell } 320254885Sdumbbell dev_err(rdev->dev, "Userspace still has active objects !\n"); 321254885Sdumbbell list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 322280183Sdumbbell DRM_LOCK(rdev->ddev); 323254885Sdumbbell dev_err(rdev->dev, "%p %p %lu %lu force free\n", 324254885Sdumbbell &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 325254885Sdumbbell *((unsigned long *)&bo->gem_base.refcount)); 326254885Sdumbbell sx_xlock(&bo->rdev->gem.mutex); 327254885Sdumbbell list_del_init(&bo->list); 328254885Sdumbbell sx_xunlock(&bo->rdev->gem.mutex); 329254885Sdumbbell /* this should unref the ttm bo */ 330254885Sdumbbell drm_gem_object_unreference(&bo->gem_base); 331280183Sdumbbell DRM_UNLOCK(rdev->ddev); 332254885Sdumbbell } 333254885Sdumbbell} 334254885Sdumbbell 335254885Sdumbbellint radeon_bo_init(struct radeon_device *rdev) 336254885Sdumbbell{ 337254885Sdumbbell /* Add an MTRR for the VRAM */ 338254885Sdumbbell rdev->mc.vram_mtrr = drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 339254885Sdumbbell DRM_MTRR_WC); 340254885Sdumbbell DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n", 341254885Sdumbbell (uintmax_t)rdev->mc.mc_vram_size >> 20, 342254885Sdumbbell (uintmax_t)rdev->mc.aper_size >> 20); 343254885Sdumbbell DRM_INFO("RAM width %dbits %cDR\n", 344254885Sdumbbell rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 345254885Sdumbbell return radeon_ttm_init(rdev); 346254885Sdumbbell} 347254885Sdumbbell 348254885Sdumbbellvoid radeon_bo_fini(struct radeon_device *rdev) 349254885Sdumbbell{ 350254885Sdumbbell radeon_ttm_fini(rdev); 351254885Sdumbbell} 352254885Sdumbbell 353254885Sdumbbellvoid radeon_bo_list_add_object(struct radeon_bo_list *lobj, 354254885Sdumbbell struct list_head *head) 355254885Sdumbbell{ 356254885Sdumbbell if (lobj->wdomain) { 357254885Sdumbbell list_add(&lobj->tv.head, head); 358254885Sdumbbell } else { 359254885Sdumbbell list_add_tail(&lobj->tv.head, head); 360254885Sdumbbell } 361254885Sdumbbell} 362254885Sdumbbell 363254885Sdumbbellint radeon_bo_list_validate(struct list_head *head) 364254885Sdumbbell{ 365254885Sdumbbell struct radeon_bo_list *lobj; 366254885Sdumbbell struct radeon_bo *bo; 367254885Sdumbbell u32 domain; 368254885Sdumbbell int r; 369254885Sdumbbell 370254885Sdumbbell r = ttm_eu_reserve_buffers(head); 371254885Sdumbbell if (unlikely(r != 0)) { 372254885Sdumbbell return r; 373254885Sdumbbell } 374254885Sdumbbell list_for_each_entry(lobj, head, tv.head) { 375254885Sdumbbell bo = lobj->bo; 376254885Sdumbbell if (!bo->pin_count) { 377254885Sdumbbell domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 378254885Sdumbbell 379254885Sdumbbell retry: 380254885Sdumbbell radeon_ttm_placement_from_domain(bo, domain); 381254885Sdumbbell r = ttm_bo_validate(&bo->tbo, &bo->placement, 382254885Sdumbbell true, false); 383254885Sdumbbell if (unlikely(r)) { 384254885Sdumbbell if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 385254885Sdumbbell domain |= RADEON_GEM_DOMAIN_GTT; 386254885Sdumbbell goto retry; 387254885Sdumbbell } 388254885Sdumbbell return r; 389254885Sdumbbell } 390254885Sdumbbell } 391254885Sdumbbell lobj->gpu_offset = radeon_bo_gpu_offset(bo); 392254885Sdumbbell lobj->tiling_flags = bo->tiling_flags; 393254885Sdumbbell } 394254885Sdumbbell return 0; 395254885Sdumbbell} 396254885Sdumbbell 397280183Sdumbbell#ifdef FREEBSD_WIP 398254885Sdumbbellint radeon_bo_fbdev_mmap(struct radeon_bo *bo, 399254885Sdumbbell struct vm_area_struct *vma) 400254885Sdumbbell{ 401254885Sdumbbell return ttm_fbdev_mmap(vma, &bo->tbo); 402254885Sdumbbell} 403280183Sdumbbell#endif /* FREEBSD_WIP */ 404254885Sdumbbell 405254885Sdumbbellint radeon_bo_get_surface_reg(struct radeon_bo *bo) 406254885Sdumbbell{ 407254885Sdumbbell struct radeon_device *rdev = bo->rdev; 408254885Sdumbbell struct radeon_surface_reg *reg; 409254885Sdumbbell struct radeon_bo *old_object; 410254885Sdumbbell int steal; 411254885Sdumbbell int i; 412254885Sdumbbell 413254885Sdumbbell KASSERT(radeon_bo_is_reserved(bo), 414254885Sdumbbell ("radeon_bo_get_surface_reg: radeon_bo is not reserved")); 415254885Sdumbbell 416254885Sdumbbell if (!bo->tiling_flags) 417254885Sdumbbell return 0; 418254885Sdumbbell 419254885Sdumbbell if (bo->surface_reg >= 0) { 420254885Sdumbbell reg = &rdev->surface_regs[bo->surface_reg]; 421254885Sdumbbell i = bo->surface_reg; 422254885Sdumbbell goto out; 423254885Sdumbbell } 424254885Sdumbbell 425254885Sdumbbell steal = -1; 426254885Sdumbbell for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 427254885Sdumbbell 428254885Sdumbbell reg = &rdev->surface_regs[i]; 429254885Sdumbbell if (!reg->bo) 430254885Sdumbbell break; 431254885Sdumbbell 432254885Sdumbbell old_object = reg->bo; 433254885Sdumbbell if (old_object->pin_count == 0) 434254885Sdumbbell steal = i; 435254885Sdumbbell } 436254885Sdumbbell 437254885Sdumbbell /* if we are all out */ 438254885Sdumbbell if (i == RADEON_GEM_MAX_SURFACES) { 439254885Sdumbbell if (steal == -1) 440254885Sdumbbell return -ENOMEM; 441254885Sdumbbell /* find someone with a surface reg and nuke their BO */ 442254885Sdumbbell reg = &rdev->surface_regs[steal]; 443254885Sdumbbell old_object = reg->bo; 444254885Sdumbbell /* blow away the mapping */ 445254885Sdumbbell DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 446254885Sdumbbell ttm_bo_unmap_virtual(&old_object->tbo); 447254885Sdumbbell old_object->surface_reg = -1; 448254885Sdumbbell i = steal; 449254885Sdumbbell } 450254885Sdumbbell 451254885Sdumbbell bo->surface_reg = i; 452254885Sdumbbell reg->bo = bo; 453254885Sdumbbell 454254885Sdumbbellout: 455254885Sdumbbell radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 456254885Sdumbbell bo->tbo.mem.start << PAGE_SHIFT, 457254885Sdumbbell bo->tbo.num_pages << PAGE_SHIFT); 458254885Sdumbbell return 0; 459254885Sdumbbell} 460254885Sdumbbell 461254885Sdumbbellstatic void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 462254885Sdumbbell{ 463254885Sdumbbell struct radeon_device *rdev = bo->rdev; 464254885Sdumbbell struct radeon_surface_reg *reg; 465254885Sdumbbell 466254885Sdumbbell if (bo->surface_reg == -1) 467254885Sdumbbell return; 468254885Sdumbbell 469254885Sdumbbell reg = &rdev->surface_regs[bo->surface_reg]; 470254885Sdumbbell radeon_clear_surface_reg(rdev, bo->surface_reg); 471254885Sdumbbell 472254885Sdumbbell reg->bo = NULL; 473254885Sdumbbell bo->surface_reg = -1; 474254885Sdumbbell} 475254885Sdumbbell 476254885Sdumbbellint radeon_bo_set_tiling_flags(struct radeon_bo *bo, 477254885Sdumbbell uint32_t tiling_flags, uint32_t pitch) 478254885Sdumbbell{ 479254885Sdumbbell struct radeon_device *rdev = bo->rdev; 480254885Sdumbbell int r; 481254885Sdumbbell 482254885Sdumbbell if (rdev->family >= CHIP_CEDAR) { 483254885Sdumbbell unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 484254885Sdumbbell 485254885Sdumbbell bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 486254885Sdumbbell bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 487254885Sdumbbell mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 488254885Sdumbbell tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 489254885Sdumbbell stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 490254885Sdumbbell switch (bankw) { 491254885Sdumbbell case 0: 492254885Sdumbbell case 1: 493254885Sdumbbell case 2: 494254885Sdumbbell case 4: 495254885Sdumbbell case 8: 496254885Sdumbbell break; 497254885Sdumbbell default: 498254885Sdumbbell return -EINVAL; 499254885Sdumbbell } 500254885Sdumbbell switch (bankh) { 501254885Sdumbbell case 0: 502254885Sdumbbell case 1: 503254885Sdumbbell case 2: 504254885Sdumbbell case 4: 505254885Sdumbbell case 8: 506254885Sdumbbell break; 507254885Sdumbbell default: 508254885Sdumbbell return -EINVAL; 509254885Sdumbbell } 510254885Sdumbbell switch (mtaspect) { 511254885Sdumbbell case 0: 512254885Sdumbbell case 1: 513254885Sdumbbell case 2: 514254885Sdumbbell case 4: 515254885Sdumbbell case 8: 516254885Sdumbbell break; 517254885Sdumbbell default: 518254885Sdumbbell return -EINVAL; 519254885Sdumbbell } 520254885Sdumbbell if (tilesplit > 6) { 521254885Sdumbbell return -EINVAL; 522254885Sdumbbell } 523254885Sdumbbell if (stilesplit > 6) { 524254885Sdumbbell return -EINVAL; 525254885Sdumbbell } 526254885Sdumbbell } 527254885Sdumbbell r = radeon_bo_reserve(bo, false); 528254885Sdumbbell if (unlikely(r != 0)) 529254885Sdumbbell return r; 530254885Sdumbbell bo->tiling_flags = tiling_flags; 531254885Sdumbbell bo->pitch = pitch; 532254885Sdumbbell radeon_bo_unreserve(bo); 533254885Sdumbbell return 0; 534254885Sdumbbell} 535254885Sdumbbell 536254885Sdumbbellvoid radeon_bo_get_tiling_flags(struct radeon_bo *bo, 537254885Sdumbbell uint32_t *tiling_flags, 538254885Sdumbbell uint32_t *pitch) 539254885Sdumbbell{ 540254885Sdumbbell KASSERT(radeon_bo_is_reserved(bo), 541254885Sdumbbell ("radeon_bo_get_tiling_flags: radeon_bo is not reserved")); 542254885Sdumbbell if (tiling_flags) 543254885Sdumbbell *tiling_flags = bo->tiling_flags; 544254885Sdumbbell if (pitch) 545254885Sdumbbell *pitch = bo->pitch; 546254885Sdumbbell} 547254885Sdumbbell 548254885Sdumbbellint radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 549254885Sdumbbell bool force_drop) 550254885Sdumbbell{ 551254885Sdumbbell KASSERT((radeon_bo_is_reserved(bo) || force_drop), 552254885Sdumbbell ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop")); 553254885Sdumbbell 554254885Sdumbbell if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 555254885Sdumbbell return 0; 556254885Sdumbbell 557254885Sdumbbell if (force_drop) { 558254885Sdumbbell radeon_bo_clear_surface_reg(bo); 559254885Sdumbbell return 0; 560254885Sdumbbell } 561254885Sdumbbell 562254885Sdumbbell if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 563254885Sdumbbell if (!has_moved) 564254885Sdumbbell return 0; 565254885Sdumbbell 566254885Sdumbbell if (bo->surface_reg >= 0) 567254885Sdumbbell radeon_bo_clear_surface_reg(bo); 568254885Sdumbbell return 0; 569254885Sdumbbell } 570254885Sdumbbell 571254885Sdumbbell if ((bo->surface_reg >= 0) && !has_moved) 572254885Sdumbbell return 0; 573254885Sdumbbell 574254885Sdumbbell return radeon_bo_get_surface_reg(bo); 575254885Sdumbbell} 576254885Sdumbbell 577254885Sdumbbellvoid radeon_bo_move_notify(struct ttm_buffer_object *bo, 578254885Sdumbbell struct ttm_mem_reg *mem) 579254885Sdumbbell{ 580254885Sdumbbell struct radeon_bo *rbo; 581254885Sdumbbell if (!radeon_ttm_bo_is_radeon_bo(bo)) 582254885Sdumbbell return; 583254885Sdumbbell rbo = container_of(bo, struct radeon_bo, tbo); 584254885Sdumbbell radeon_bo_check_tiling(rbo, 0, 1); 585254885Sdumbbell radeon_vm_bo_invalidate(rbo->rdev, rbo); 586254885Sdumbbell} 587254885Sdumbbell 588254885Sdumbbellint radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 589254885Sdumbbell{ 590254885Sdumbbell struct radeon_device *rdev; 591254885Sdumbbell struct radeon_bo *rbo; 592254885Sdumbbell unsigned long offset, size; 593254885Sdumbbell int r; 594254885Sdumbbell 595254885Sdumbbell if (!radeon_ttm_bo_is_radeon_bo(bo)) 596254885Sdumbbell return 0; 597254885Sdumbbell rbo = container_of(bo, struct radeon_bo, tbo); 598254885Sdumbbell radeon_bo_check_tiling(rbo, 0, 0); 599254885Sdumbbell rdev = rbo->rdev; 600254885Sdumbbell if (bo->mem.mem_type == TTM_PL_VRAM) { 601254885Sdumbbell size = bo->mem.num_pages << PAGE_SHIFT; 602254885Sdumbbell offset = bo->mem.start << PAGE_SHIFT; 603254885Sdumbbell if ((offset + size) > rdev->mc.visible_vram_size) { 604254885Sdumbbell /* hurrah the memory is not visible ! */ 605254885Sdumbbell radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 606254885Sdumbbell rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 607254885Sdumbbell r = ttm_bo_validate(bo, &rbo->placement, false, false); 608254885Sdumbbell if (unlikely(r != 0)) 609254885Sdumbbell return r; 610254885Sdumbbell offset = bo->mem.start << PAGE_SHIFT; 611254885Sdumbbell /* this should not happen */ 612254885Sdumbbell if ((offset + size) > rdev->mc.visible_vram_size) 613254885Sdumbbell return -EINVAL; 614254885Sdumbbell } 615254885Sdumbbell } 616254885Sdumbbell return 0; 617254885Sdumbbell} 618254885Sdumbbell 619254885Sdumbbellint radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 620254885Sdumbbell{ 621254885Sdumbbell int r; 622254885Sdumbbell 623254885Sdumbbell r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 624254885Sdumbbell if (unlikely(r != 0)) 625254885Sdumbbell return r; 626254885Sdumbbell mtx_lock(&bo->tbo.bdev->fence_lock); 627254885Sdumbbell if (mem_type) 628254885Sdumbbell *mem_type = bo->tbo.mem.mem_type; 629254885Sdumbbell if (bo->tbo.sync_obj) 630254885Sdumbbell r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 631254885Sdumbbell mtx_unlock(&bo->tbo.bdev->fence_lock); 632254885Sdumbbell ttm_bo_unreserve(&bo->tbo); 633254885Sdumbbell return r; 634254885Sdumbbell} 635254885Sdumbbell 636254885Sdumbbell 637254885Sdumbbell/** 638254885Sdumbbell * radeon_bo_reserve - reserve bo 639254885Sdumbbell * @bo: bo structure 640254885Sdumbbell * @no_intr: don't return -ERESTARTSYS on pending signal 641254885Sdumbbell * 642254885Sdumbbell * Returns: 643254885Sdumbbell * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 644254885Sdumbbell * a signal. Release all buffer reservations and return to user-space. 645254885Sdumbbell */ 646254885Sdumbbellint radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 647254885Sdumbbell{ 648254885Sdumbbell int r; 649254885Sdumbbell 650254885Sdumbbell r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 651254885Sdumbbell if (unlikely(r != 0)) { 652254885Sdumbbell if (r != -ERESTARTSYS) 653254885Sdumbbell dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 654254885Sdumbbell return r; 655254885Sdumbbell } 656254885Sdumbbell return 0; 657254885Sdumbbell} 658