1/* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 33#include <linux/dma-mapping.h> 34#include <linux/iommu.h> 35#include <linux/pagemap.h> 36#include <linux/sched/task.h> 37#include <linux/sched/mm.h> 38#include <linux/seq_file.h> 39#include <linux/slab.h> 40#include <linux/swap.h> 41#include <linux/dma-buf.h> 42#include <linux/sizes.h> 43#include <linux/module.h> 44 45#include <drm/drm_drv.h> 46#include <drm/ttm/ttm_bo.h> 47#include <drm/ttm/ttm_placement.h> 48#include <drm/ttm/ttm_range_manager.h> 49#include <drm/ttm/ttm_tt.h> 50 51#include <drm/amdgpu_drm.h> 52 53#include "amdgpu.h" 54#include "amdgpu_object.h" 55#include "amdgpu_trace.h" 56#include "amdgpu_amdkfd.h" 57#include "amdgpu_sdma.h" 58#include "amdgpu_ras.h" 59#include "amdgpu_hmm.h" 60#include "amdgpu_atomfirmware.h" 61#include "amdgpu_res_cursor.h" 62#include "bif/bif_4_1_d.h" 63 64MODULE_IMPORT_NS(DMA_BUF); 65 66#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) 67 68static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 69 struct ttm_tt *ttm, 70 struct ttm_resource *bo_mem); 71static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 72 struct ttm_tt *ttm); 73 74static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, 75 unsigned int type, 76 uint64_t size_in_page) 77{ 78 return ttm_range_man_init(&adev->mman.bdev, type, 79 false, size_in_page); 80} 81 82/** 83 * amdgpu_evict_flags - Compute placement flags 84 * 85 * @bo: The buffer object to evict 86 * @placement: Possible destination(s) for evicted BO 87 * 88 * Fill in placement data when ttm_bo_evict() is called 89 */ 90static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 91 struct ttm_placement *placement) 92{ 93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 94 struct amdgpu_bo *abo; 95 static const struct ttm_place placements = { 96 .fpfn = 0, 97 .lpfn = 0, 98 .mem_type = TTM_PL_SYSTEM, 99 .flags = 0 100 }; 101 102 /* Don't handle scatter gather BOs */ 103 if (bo->type == ttm_bo_type_sg) { 104 placement->num_placement = 0; 105 placement->num_busy_placement = 0; 106 return; 107 } 108 109 /* Object isn't an AMDGPU object so ignore */ 110 if (!amdgpu_bo_is_amdgpu_bo(bo)) { 111 placement->placement = &placements; 112 placement->busy_placement = &placements; 113 placement->num_placement = 1; 114 placement->num_busy_placement = 1; 115 return; 116 } 117 118 abo = ttm_to_amdgpu_bo(bo); 119 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) { 120 placement->num_placement = 0; 121 placement->num_busy_placement = 0; 122 return; 123 } 124 125 switch (bo->resource->mem_type) { 126 case AMDGPU_PL_GDS: 127 case AMDGPU_PL_GWS: 128 case AMDGPU_PL_OA: 129 case AMDGPU_PL_DOORBELL: 130 placement->num_placement = 0; 131 placement->num_busy_placement = 0; 132 return; 133 134 case TTM_PL_VRAM: 135 if (!adev->mman.buffer_funcs_enabled) { 136 /* Move to system memory */ 137 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 138 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 139 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 140 amdgpu_res_cpu_visible(adev, bo->resource)) { 141 142 /* Try evicting to the CPU inaccessible part of VRAM 143 * first, but only set GTT as busy placement, so this 144 * BO will be evicted to GTT rather than causing other 145 * BOs to be evicted from VRAM 146 */ 147 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 148 AMDGPU_GEM_DOMAIN_GTT | 149 AMDGPU_GEM_DOMAIN_CPU); 150 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 151 abo->placements[0].lpfn = 0; 152 abo->placement.busy_placement = &abo->placements[1]; 153 abo->placement.num_busy_placement = 1; 154 } else { 155 /* Move to GTT memory */ 156 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | 157 AMDGPU_GEM_DOMAIN_CPU); 158 } 159 break; 160 case TTM_PL_TT: 161 case AMDGPU_PL_PREEMPT: 162 default: 163 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 164 break; 165 } 166 *placement = abo->placement; 167} 168 169/** 170 * amdgpu_ttm_map_buffer - Map memory into the GART windows 171 * @bo: buffer object to map 172 * @mem: memory object to map 173 * @mm_cur: range to map 174 * @window: which GART window to use 175 * @ring: DMA ring to use for the copy 176 * @tmz: if we should setup a TMZ enabled mapping 177 * @size: in number of bytes to map, out number of bytes mapped 178 * @addr: resulting address inside the MC address space 179 * 180 * Setup one of the GART windows to access a specific piece of memory or return 181 * the physical address for local memory. 182 */ 183static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, 184 struct ttm_resource *mem, 185 struct amdgpu_res_cursor *mm_cur, 186 unsigned int window, struct amdgpu_ring *ring, 187 bool tmz, uint64_t *size, uint64_t *addr) 188{ 189 struct amdgpu_device *adev = ring->adev; 190 unsigned int offset, num_pages, num_dw, num_bytes; 191 uint64_t src_addr, dst_addr; 192 struct amdgpu_job *job; 193 void *cpu_addr; 194 uint64_t flags; 195 unsigned int i; 196 int r; 197 198 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 199 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 200 201 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT)) 202 return -EINVAL; 203 204 /* Map only what can't be accessed directly */ 205 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { 206 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + 207 mm_cur->start; 208 return 0; 209 } 210 211 212 /* 213 * If start begins at an offset inside the page, then adjust the size 214 * and addr accordingly 215 */ 216 offset = mm_cur->start & ~LINUX_PAGE_MASK; 217 218 num_pages = PFN_UP(*size + offset); 219 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE); 220 221 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset); 222 223 *addr = adev->gmc.gart_start; 224 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 225 AMDGPU_GPU_PAGE_SIZE; 226 *addr += offset; 227 228 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 229 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; 230 231 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 232 AMDGPU_FENCE_OWNER_UNDEFINED, 233 num_dw * 4 + num_bytes, 234 AMDGPU_IB_POOL_DELAYED, &job); 235 if (r) 236 return r; 237 238 src_addr = num_dw * 4; 239 src_addr += job->ibs[0].gpu_addr; 240 241 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 242 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; 243 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 244 dst_addr, num_bytes, false); 245 246 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 247 WARN_ON(job->ibs[0].length_dw > num_dw); 248 249 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); 250 if (tmz) 251 flags |= AMDGPU_PTE_TMZ; 252 253 cpu_addr = &job->ibs[0].ptr[num_dw]; 254 255 if (mem->mem_type == TTM_PL_TT) { 256 dma_addr_t *dma_addr; 257 258 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; 259 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); 260 } else { 261 dma_addr_t dma_address; 262 263 dma_address = mm_cur->start; 264 dma_address += adev->vm_manager.vram_base_offset; 265 266 for (i = 0; i < num_pages; ++i) { 267 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, 268 flags, cpu_addr); 269 dma_address += PAGE_SIZE; 270 } 271 } 272 273 dma_fence_put(amdgpu_job_submit(job)); 274 return 0; 275} 276 277/** 278 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy 279 * @adev: amdgpu device 280 * @src: buffer/address where to read from 281 * @dst: buffer/address where to write to 282 * @size: number of bytes to copy 283 * @tmz: if a secure copy should be used 284 * @resv: resv object to sync to 285 * @f: Returns the last fence if multiple jobs are submitted. 286 * 287 * The function copies @size bytes from {src->mem + src->offset} to 288 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a 289 * move and different for a BO to BO copy. 290 * 291 */ 292int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 293 const struct amdgpu_copy_mem *src, 294 const struct amdgpu_copy_mem *dst, 295 uint64_t size, bool tmz, 296 struct dma_resv *resv, 297 struct dma_fence **f) 298{ 299 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 300 struct amdgpu_res_cursor src_mm, dst_mm; 301 struct dma_fence *fence = NULL; 302 int r = 0; 303 304 if (!adev->mman.buffer_funcs_enabled) { 305 DRM_ERROR("Trying to move memory with ring turned off.\n"); 306 return -EINVAL; 307 } 308 309 amdgpu_res_first(src->mem, src->offset, size, &src_mm); 310 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); 311 312 mutex_lock(&adev->mman.gtt_window_lock); 313 while (src_mm.remaining) { 314 uint64_t from, to, cur_size; 315 struct dma_fence *next; 316 317 /* Never copy more than 256MiB at once to avoid a timeout */ 318 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20); 319 320 /* Map src to window 0 and dst to window 1. */ 321 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, 322 0, ring, tmz, &cur_size, &from); 323 if (r) 324 goto error; 325 326 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, 327 1, ring, tmz, &cur_size, &to); 328 if (r) 329 goto error; 330 331 r = amdgpu_copy_buffer(ring, from, to, cur_size, 332 resv, &next, false, true, tmz); 333 if (r) 334 goto error; 335 336 dma_fence_put(fence); 337 fence = next; 338 339 amdgpu_res_next(&src_mm, cur_size); 340 amdgpu_res_next(&dst_mm, cur_size); 341 } 342error: 343 mutex_unlock(&adev->mman.gtt_window_lock); 344 if (f) 345 *f = dma_fence_get(fence); 346 dma_fence_put(fence); 347 return r; 348} 349 350/* 351 * amdgpu_move_blit - Copy an entire buffer to another buffer 352 * 353 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to 354 * help move buffers to and from VRAM. 355 */ 356static int amdgpu_move_blit(struct ttm_buffer_object *bo, 357 bool evict, 358 struct ttm_resource *new_mem, 359 struct ttm_resource *old_mem) 360{ 361 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 362 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 363 struct amdgpu_copy_mem src, dst; 364 struct dma_fence *fence = NULL; 365 int r; 366 367 src.bo = bo; 368 dst.bo = bo; 369 src.mem = old_mem; 370 dst.mem = new_mem; 371 src.offset = 0; 372 dst.offset = 0; 373 374 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, 375 new_mem->size, 376 amdgpu_bo_encrypted(abo), 377 bo->base.resv, &fence); 378 if (r) 379 goto error; 380 381 /* clear the space being freed */ 382 if (old_mem->mem_type == TTM_PL_VRAM && 383 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { 384 struct dma_fence *wipe_fence = NULL; 385 386 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence, 387 false); 388 if (r) { 389 goto error; 390 } else if (wipe_fence) { 391 dma_fence_put(fence); 392 fence = wipe_fence; 393 } 394 } 395 396 /* Always block for VM page tables before committing the new location */ 397 if (bo->type == ttm_bo_type_kernel) 398 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); 399 else 400 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); 401 dma_fence_put(fence); 402 return r; 403 404error: 405 if (fence) 406 dma_fence_wait(fence, false); 407 dma_fence_put(fence); 408 return r; 409} 410 411/** 412 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU 413 * @adev: amdgpu device 414 * @res: the resource to check 415 * 416 * Returns: true if the full resource is CPU visible, false otherwise. 417 */ 418bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 419 struct ttm_resource *res) 420{ 421 struct amdgpu_res_cursor cursor; 422 423 if (!res) 424 return false; 425 426 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || 427 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL) 428 return true; 429 430 if (res->mem_type != TTM_PL_VRAM) 431 return false; 432 433 amdgpu_res_first(res, 0, res->size, &cursor); 434 while (cursor.remaining) { 435 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size) 436 return false; 437 amdgpu_res_next(&cursor, cursor.size); 438 } 439 440 return true; 441} 442 443/* 444 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy 445 * 446 * Called by amdgpu_bo_move() 447 */ 448static bool amdgpu_res_copyable(struct amdgpu_device *adev, 449 struct ttm_resource *mem) 450{ 451 if (!amdgpu_res_cpu_visible(adev, mem)) 452 return false; 453 454 /* ttm_resource_ioremap only supports contiguous memory */ 455 if (mem->mem_type == TTM_PL_VRAM && 456 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) 457 return false; 458 459 return true; 460} 461 462/* 463 * amdgpu_bo_move - Move a buffer object to a new memory location 464 * 465 * Called by ttm_bo_handle_move_mem() 466 */ 467static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, 468 struct ttm_operation_ctx *ctx, 469 struct ttm_resource *new_mem, 470 struct ttm_place *hop) 471{ 472 struct amdgpu_device *adev; 473 struct amdgpu_bo *abo; 474 struct ttm_resource *old_mem = bo->resource; 475 int r; 476 477 if (new_mem->mem_type == TTM_PL_TT || 478 new_mem->mem_type == AMDGPU_PL_PREEMPT) { 479 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); 480 if (r) 481 return r; 482 } 483 484 abo = ttm_to_amdgpu_bo(bo); 485 adev = amdgpu_ttm_adev(bo->bdev); 486 487 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && 488 bo->ttm == NULL)) { 489 amdgpu_bo_move_notify(bo, evict, new_mem); 490 ttm_bo_move_null(bo, new_mem); 491 return 0; 492 } 493 if (old_mem->mem_type == TTM_PL_SYSTEM && 494 (new_mem->mem_type == TTM_PL_TT || 495 new_mem->mem_type == AMDGPU_PL_PREEMPT)) { 496 amdgpu_bo_move_notify(bo, evict, new_mem); 497 ttm_bo_move_null(bo, new_mem); 498 return 0; 499 } 500 if ((old_mem->mem_type == TTM_PL_TT || 501 old_mem->mem_type == AMDGPU_PL_PREEMPT) && 502 new_mem->mem_type == TTM_PL_SYSTEM) { 503 r = ttm_bo_wait_ctx(bo, ctx); 504 if (r) 505 return r; 506 507 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); 508 amdgpu_bo_move_notify(bo, evict, new_mem); 509 ttm_resource_free(bo, &bo->resource); 510 ttm_bo_assign_mem(bo, new_mem); 511 return 0; 512 } 513 514 if (old_mem->mem_type == AMDGPU_PL_GDS || 515 old_mem->mem_type == AMDGPU_PL_GWS || 516 old_mem->mem_type == AMDGPU_PL_OA || 517 old_mem->mem_type == AMDGPU_PL_DOORBELL || 518 new_mem->mem_type == AMDGPU_PL_GDS || 519 new_mem->mem_type == AMDGPU_PL_GWS || 520 new_mem->mem_type == AMDGPU_PL_OA || 521 new_mem->mem_type == AMDGPU_PL_DOORBELL) { 522 /* Nothing to save here */ 523 amdgpu_bo_move_notify(bo, evict, new_mem); 524 ttm_bo_move_null(bo, new_mem); 525 return 0; 526 } 527 528 if (bo->type == ttm_bo_type_device && 529 new_mem->mem_type == TTM_PL_VRAM && 530 old_mem->mem_type != TTM_PL_VRAM) { 531 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 532 * accesses the BO after it's moved. 533 */ 534 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 535 } 536 537 if (adev->mman.buffer_funcs_enabled && 538 ((old_mem->mem_type == TTM_PL_SYSTEM && 539 new_mem->mem_type == TTM_PL_VRAM) || 540 (old_mem->mem_type == TTM_PL_VRAM && 541 new_mem->mem_type == TTM_PL_SYSTEM))) { 542 hop->fpfn = 0; 543 hop->lpfn = 0; 544 hop->mem_type = TTM_PL_TT; 545 hop->flags = TTM_PL_FLAG_TEMPORARY; 546 return -EMULTIHOP; 547 } 548 549 amdgpu_bo_move_notify(bo, evict, new_mem); 550 if (adev->mman.buffer_funcs_enabled) 551 r = amdgpu_move_blit(bo, evict, new_mem, old_mem); 552 else 553 r = -ENODEV; 554 555 if (r) { 556 /* Check that all memory is CPU accessible */ 557 if (!amdgpu_res_copyable(adev, old_mem) || 558 !amdgpu_res_copyable(adev, new_mem)) { 559 pr_err("Move buffer fallback to memcpy unavailable\n"); 560 return r; 561 } 562 563 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 564 if (r) 565 return r; 566 } 567 568 /* update statistics after the move */ 569 if (evict) 570 atomic64_inc(&adev->num_evictions); 571 atomic64_add(bo->base.size, &adev->num_bytes_moved); 572 return 0; 573} 574 575/* 576 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault 577 * 578 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() 579 */ 580static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, 581 struct ttm_resource *mem) 582{ 583 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 584 585 switch (mem->mem_type) { 586 case TTM_PL_SYSTEM: 587 /* system memory */ 588 return 0; 589 case TTM_PL_TT: 590 case AMDGPU_PL_PREEMPT: 591 break; 592 case TTM_PL_VRAM: 593 mem->bus.offset = mem->start << PAGE_SHIFT; 594 595 if (adev->mman.aper_base_kaddr && 596 mem->placement & TTM_PL_FLAG_CONTIGUOUS) 597 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + 598 mem->bus.offset; 599 600 mem->bus.offset += adev->gmc.aper_base; 601 mem->bus.is_iomem = true; 602 break; 603 case AMDGPU_PL_DOORBELL: 604 mem->bus.offset = mem->start << PAGE_SHIFT; 605 mem->bus.offset += adev->doorbell.base; 606 mem->bus.is_iomem = true; 607 mem->bus.caching = ttm_uncached; 608 break; 609 default: 610 return -EINVAL; 611 } 612 return 0; 613} 614 615static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 616 unsigned long page_offset) 617{ 618 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 619 struct amdgpu_res_cursor cursor; 620 621 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, 622 &cursor); 623 624 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) 625 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; 626 627 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; 628} 629 630/** 631 * amdgpu_ttm_domain_start - Returns GPU start address 632 * @adev: amdgpu device object 633 * @type: type of the memory 634 * 635 * Returns: 636 * GPU start address of a memory domain 637 */ 638 639uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) 640{ 641 switch (type) { 642 case TTM_PL_TT: 643 return adev->gmc.gart_start; 644 case TTM_PL_VRAM: 645 return adev->gmc.vram_start; 646 } 647 648 return 0; 649} 650 651/* 652 * TTM backend functions. 653 */ 654struct amdgpu_ttm_tt { 655 struct ttm_tt ttm; 656 struct drm_gem_object *gobj; 657 u64 offset; 658 uint64_t userptr; 659 struct task_struct *usertask; 660 uint32_t userflags; 661 bool bound; 662 int32_t pool_id; 663}; 664 665#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) 666 667#ifdef CONFIG_DRM_AMDGPU_USERPTR 668/* 669 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user 670 * memory and start HMM tracking CPU page table update 671 * 672 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only 673 * once afterwards to stop HMM tracking 674 */ 675int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct vm_page **pages, 676 struct hmm_range **range) 677{ 678 struct ttm_tt *ttm = bo->tbo.ttm; 679 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 680 unsigned long start = gtt->userptr; 681 struct vm_area_struct *vma; 682 struct mm_struct *mm; 683 bool readonly; 684 int r = 0; 685 686 /* Make sure get_user_pages_done() can cleanup gracefully */ 687 *range = NULL; 688 689 mm = bo->notifier.mm; 690 if (unlikely(!mm)) { 691 DRM_DEBUG_DRIVER("BO is not registered?\n"); 692 return -EFAULT; 693 } 694 695 if (!mmget_not_zero(mm)) /* Happens during process shutdown */ 696 return -ESRCH; 697 698 mmap_read_lock(mm); 699 vma = vma_lookup(mm, start); 700 if (unlikely(!vma)) { 701 r = -EFAULT; 702 goto out_unlock; 703 } 704 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && 705 vma->vm_file)) { 706 r = -EPERM; 707 goto out_unlock; 708 } 709 710 readonly = amdgpu_ttm_tt_is_readonly(ttm); 711 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages, 712 readonly, NULL, pages, range); 713out_unlock: 714 mmap_read_unlock(mm); 715 if (r) 716 pr_debug("failed %d to get user pages 0x%lx\n", r, start); 717 718 mmput(mm); 719 720 return r; 721} 722 723/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations 724 */ 725void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, 726 struct hmm_range *range) 727{ 728 struct amdgpu_ttm_tt *gtt = (void *)ttm; 729 730 if (gtt && gtt->userptr && range) 731 amdgpu_hmm_range_get_pages_done(range); 732} 733 734/* 735 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change 736 * Check if the pages backing this ttm range have been invalidated 737 * 738 * Returns: true if pages are still valid 739 */ 740bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, 741 struct hmm_range *range) 742{ 743 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 744 745 if (!gtt || !gtt->userptr || !range) 746 return false; 747 748 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", 749 gtt->userptr, ttm->num_pages); 750 751 WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); 752 753 return !amdgpu_hmm_range_get_pages_done(range); 754} 755#endif 756 757/* 758 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. 759 * 760 * Called by amdgpu_cs_list_validate(). This creates the page list 761 * that backs user memory and will ultimately be mapped into the device 762 * address space. 763 */ 764void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct vm_page **pages) 765{ 766 unsigned long i; 767 768 for (i = 0; i < ttm->num_pages; ++i) 769 ttm->pages[i] = pages ? pages[i] : NULL; 770} 771 772/* 773 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages 774 * 775 * Called by amdgpu_ttm_backend_bind() 776 **/ 777static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, 778 struct ttm_tt *ttm) 779{ 780 STUB(); 781 return -ENOSYS; 782#ifdef notyet 783 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 784 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 785 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 786 enum dma_data_direction direction = write ? 787 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 788 int r; 789 790 /* Allocate an SG array and squash pages into it */ 791 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 792 (u64)ttm->num_pages << PAGE_SHIFT, 793 GFP_KERNEL); 794 if (r) 795 goto release_sg; 796 797 /* Map SG to device */ 798 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 799 if (r) 800 goto release_sg; 801 802 /* convert SG to linear array of pages and dma addresses */ 803 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 804 ttm->num_pages); 805 806 return 0; 807 808release_sg: 809 kfree(ttm->sg); 810 ttm->sg = NULL; 811 return r; 812#endif 813} 814 815/* 816 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages 817 */ 818static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, 819 struct ttm_tt *ttm) 820{ 821 STUB(); 822#ifdef notyet 823 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 824 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 825 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 826 enum dma_data_direction direction = write ? 827 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 828 829 /* double check that we don't free the table twice */ 830 if (!ttm->sg || !ttm->sg->sgl) 831 return; 832 833 /* unmap the pages mapped to the device */ 834 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 835 sg_free_table(ttm->sg); 836#endif 837} 838 839/* 840 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... 841 * MQDn+CtrlStackn where n is the number of XCCs per partition. 842 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD 843 * and uses memory type default, UC. The rest of pages_per_xcc are 844 * Ctrl stack and modify their memory type to NC. 845 */ 846static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, 847 struct ttm_tt *ttm, uint64_t flags) 848{ 849 struct amdgpu_ttm_tt *gtt = (void *)ttm; 850 uint64_t total_pages = ttm->num_pages; 851 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); 852 uint64_t page_idx, pages_per_xcc; 853 int i; 854 uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | 855 AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); 856 857 pages_per_xcc = total_pages; 858 do_div(pages_per_xcc, num_xcc); 859 860 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { 861 /* MQD page: use default flags */ 862 amdgpu_gart_bind(adev, 863 gtt->offset + (page_idx << PAGE_SHIFT), 864 1, >t->ttm.dma_address[page_idx], flags); 865 /* 866 * Ctrl pages - modify the memory type to NC (ctrl_flags) from 867 * the second page of the BO onward. 868 */ 869 amdgpu_gart_bind(adev, 870 gtt->offset + ((page_idx + 1) << PAGE_SHIFT), 871 pages_per_xcc - 1, 872 >t->ttm.dma_address[page_idx + 1], 873 ctrl_flags); 874 } 875} 876 877static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, 878 struct ttm_buffer_object *tbo, 879 uint64_t flags) 880{ 881 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); 882 struct ttm_tt *ttm = tbo->ttm; 883 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 884 885 if (amdgpu_bo_encrypted(abo)) 886 flags |= AMDGPU_PTE_TMZ; 887 888 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { 889 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags); 890 } else { 891 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 892 gtt->ttm.dma_address, flags); 893 } 894 gtt->bound = true; 895} 896 897/* 898 * amdgpu_ttm_backend_bind - Bind GTT memory 899 * 900 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). 901 * This handles binding GTT memory to the device address space. 902 */ 903static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 904 struct ttm_tt *ttm, 905 struct ttm_resource *bo_mem) 906{ 907 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 908 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 909 uint64_t flags; 910 int r; 911 912 if (!bo_mem) 913 return -EINVAL; 914 915 if (gtt->bound) 916 return 0; 917 918 if (gtt->userptr) { 919 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); 920 if (r) { 921 DRM_ERROR("failed to pin userptr\n"); 922 return r; 923 } 924 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { 925 if (!ttm->sg) { 926 struct dma_buf_attachment *attach; 927 struct sg_table *sgt; 928 929 attach = gtt->gobj->import_attach; 930#ifdef notyet 931 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 932 if (IS_ERR(sgt)) 933 return PTR_ERR(sgt); 934#else 935 STUB(); 936 return -ENOSYS; 937#endif 938 939 ttm->sg = sgt; 940 } 941 942 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 943 ttm->num_pages); 944 } 945 946 if (!ttm->num_pages) { 947 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", 948 ttm->num_pages, bo_mem, ttm); 949 } 950 951 if (bo_mem->mem_type != TTM_PL_TT || 952 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { 953 gtt->offset = AMDGPU_BO_INVALID_OFFSET; 954 return 0; 955 } 956 957 /* compute PTE flags relevant to this BO memory */ 958 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); 959 960 /* bind pages into GART page tables */ 961 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 962 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 963 gtt->ttm.dma_address, flags); 964 gtt->bound = true; 965 return 0; 966} 967 968/* 969 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either 970 * through AGP or GART aperture. 971 * 972 * If bo is accessible through AGP aperture, then use AGP aperture 973 * to access bo; otherwise allocate logical space in GART aperture 974 * and map bo to GART aperture. 975 */ 976int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) 977{ 978 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 979 struct ttm_operation_ctx ctx = { false, false }; 980 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 981 struct ttm_placement placement; 982 struct ttm_place placements; 983 struct ttm_resource *tmp; 984 uint64_t addr, flags; 985 int r; 986 987 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) 988 return 0; 989 990 addr = amdgpu_gmc_agp_addr(bo); 991 if (addr != AMDGPU_BO_INVALID_OFFSET) { 992 bo->resource->start = addr >> PAGE_SHIFT; 993 return 0; 994 } 995 996 /* allocate GART space */ 997 placement.num_placement = 1; 998 placement.placement = &placements; 999 placement.num_busy_placement = 1; 1000 placement.busy_placement = &placements; 1001 placements.fpfn = 0; 1002 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 1003 placements.mem_type = TTM_PL_TT; 1004 placements.flags = bo->resource->placement; 1005 1006 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 1007 if (unlikely(r)) 1008 return r; 1009 1010 /* compute PTE flags for this buffer object */ 1011 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); 1012 1013 /* Bind pages */ 1014 gtt->offset = (u64)tmp->start << PAGE_SHIFT; 1015 amdgpu_ttm_gart_bind(adev, bo, flags); 1016 amdgpu_gart_invalidate_tlb(adev); 1017 ttm_resource_free(bo, &bo->resource); 1018 ttm_bo_assign_mem(bo, tmp); 1019 1020 return 0; 1021} 1022 1023/* 1024 * amdgpu_ttm_recover_gart - Rebind GTT pages 1025 * 1026 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to 1027 * rebind GTT pages during a GPU reset. 1028 */ 1029void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 1030{ 1031 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 1032 uint64_t flags; 1033 1034 if (!tbo->ttm) 1035 return; 1036 1037 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); 1038 amdgpu_ttm_gart_bind(adev, tbo, flags); 1039} 1040 1041/* 1042 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages 1043 * 1044 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and 1045 * ttm_tt_destroy(). 1046 */ 1047static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 1048 struct ttm_tt *ttm) 1049{ 1050 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1051 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1052 1053 /* if the pages have userptr pinning then clear that first */ 1054 if (gtt->userptr) { 1055 amdgpu_ttm_tt_unpin_userptr(bdev, ttm); 1056 } else if (ttm->sg && gtt->gobj->import_attach) { 1057 struct dma_buf_attachment *attach; 1058 1059 attach = gtt->gobj->import_attach; 1060#ifdef notyet 1061 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); 1062#else 1063 STUB(); 1064#endif 1065 ttm->sg = NULL; 1066 } 1067 1068 if (!gtt->bound) 1069 return; 1070 1071 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) 1072 return; 1073 1074 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 1075 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); 1076 gtt->bound = false; 1077} 1078 1079static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, 1080 struct ttm_tt *ttm) 1081{ 1082 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1083 1084#ifdef notyet 1085 if (gtt->usertask) 1086 put_task_struct(gtt->usertask); 1087#endif 1088 1089 ttm_tt_fini(>t->ttm); 1090 kfree(gtt); 1091} 1092 1093/** 1094 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO 1095 * 1096 * @bo: The buffer object to create a GTT ttm_tt object around 1097 * @page_flags: Page flags to be added to the ttm_tt object 1098 * 1099 * Called by ttm_tt_create(). 1100 */ 1101static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, 1102 uint32_t page_flags) 1103{ 1104 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1105 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1106 struct amdgpu_ttm_tt *gtt; 1107 enum ttm_caching caching; 1108 1109 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 1110 if (!gtt) 1111 return NULL; 1112 1113 gtt->gobj = &bo->base; 1114 if (adev->gmc.mem_partitions && abo->xcp_id >= 0) 1115 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); 1116 else 1117 gtt->pool_id = abo->xcp_id; 1118 1119 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 1120 caching = ttm_write_combined; 1121 else 1122 caching = ttm_cached; 1123 1124 /* allocate space for the uninitialized page entries */ 1125 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { 1126 kfree(gtt); 1127 return NULL; 1128 } 1129 return >t->ttm; 1130} 1131 1132/* 1133 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device 1134 * 1135 * Map the pages of a ttm_tt object to an address space visible 1136 * to the underlying device. 1137 */ 1138static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, 1139 struct ttm_tt *ttm, 1140 struct ttm_operation_ctx *ctx) 1141{ 1142 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1143 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1144 struct ttm_pool *pool; 1145 pgoff_t i; 1146 int ret; 1147 1148 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ 1149 if (gtt->userptr) { 1150 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 1151 if (!ttm->sg) 1152 return -ENOMEM; 1153 return 0; 1154 } 1155 1156 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1157 return 0; 1158 1159 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1160 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1161 else 1162 pool = &adev->mman.bdev.pool; 1163 ret = ttm_pool_alloc(pool, ttm, ctx); 1164 if (ret) 1165 return ret; 1166 1167#ifdef notyet 1168 for (i = 0; i < ttm->num_pages; ++i) 1169 ttm->pages[i]->mapping = bdev->dev_mapping; 1170#endif 1171 1172 return 0; 1173} 1174 1175/* 1176 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays 1177 * 1178 * Unmaps pages of a ttm_tt object from the device address space and 1179 * unpopulates the page array backing it. 1180 */ 1181static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, 1182 struct ttm_tt *ttm) 1183{ 1184 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1185 struct amdgpu_device *adev; 1186 struct ttm_pool *pool; 1187 pgoff_t i; 1188 struct vm_page *page; 1189 1190 amdgpu_ttm_backend_unbind(bdev, ttm); 1191 1192 if (gtt->userptr) { 1193 amdgpu_ttm_tt_set_user_pages(ttm, NULL); 1194 kfree(ttm->sg); 1195 ttm->sg = NULL; 1196 return; 1197 } 1198 1199 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1200 return; 1201 1202 for (i = 0; i < ttm->num_pages; ++i) { 1203 page = ttm->pages[i]; 1204 if (unlikely(page == NULL)) 1205 continue; 1206 pmap_page_protect(page, PROT_NONE); 1207 } 1208 1209 adev = amdgpu_ttm_adev(bdev); 1210 1211 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1212 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1213 else 1214 pool = &adev->mman.bdev.pool; 1215 1216 return ttm_pool_free(pool, ttm); 1217} 1218 1219/** 1220 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current 1221 * task 1222 * 1223 * @tbo: The ttm_buffer_object that contains the userptr 1224 * @user_addr: The returned value 1225 */ 1226int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 1227 uint64_t *user_addr) 1228{ 1229 struct amdgpu_ttm_tt *gtt; 1230 1231 if (!tbo->ttm) 1232 return -EINVAL; 1233 1234 gtt = (void *)tbo->ttm; 1235 *user_addr = gtt->userptr; 1236 return 0; 1237} 1238 1239/** 1240 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current 1241 * task 1242 * 1243 * @bo: The ttm_buffer_object to bind this userptr to 1244 * @addr: The address in the current tasks VM space to use 1245 * @flags: Requirements of userptr object. 1246 * 1247 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to 1248 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to 1249 * initialize GPU VM for a KFD process. 1250 */ 1251int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, 1252 uint64_t addr, uint32_t flags) 1253{ 1254 struct amdgpu_ttm_tt *gtt; 1255 1256 if (!bo->ttm) { 1257 /* TODO: We want a separate TTM object type for userptrs */ 1258 bo->ttm = amdgpu_ttm_tt_create(bo, 0); 1259 if (bo->ttm == NULL) 1260 return -ENOMEM; 1261 } 1262 1263 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ 1264 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; 1265 1266 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 1267 gtt->userptr = addr; 1268 gtt->userflags = flags; 1269 1270#ifdef notyet 1271 if (gtt->usertask) 1272 put_task_struct(gtt->usertask); 1273 gtt->usertask = current->group_leader; 1274 get_task_struct(gtt->usertask); 1275#endif 1276 1277 return 0; 1278} 1279 1280/* 1281 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object 1282 */ 1283struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 1284{ 1285 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1286 1287 if (gtt == NULL) 1288 return NULL; 1289 1290 if (gtt->usertask == NULL) 1291 return NULL; 1292 1293#ifdef notyet 1294 return gtt->usertask->mm; 1295#else 1296 STUB(); 1297 return NULL; 1298#endif 1299} 1300 1301/* 1302 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an 1303 * address range for the current task. 1304 * 1305 */ 1306bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1307 unsigned long end, unsigned long *userptr) 1308{ 1309 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1310 unsigned long size; 1311 1312 if (gtt == NULL || !gtt->userptr) 1313 return false; 1314 1315 /* Return false if no part of the ttm_tt object lies within 1316 * the range 1317 */ 1318 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; 1319 if (gtt->userptr > end || gtt->userptr + size <= start) 1320 return false; 1321 1322 if (userptr) 1323 *userptr = gtt->userptr; 1324 return true; 1325} 1326 1327/* 1328 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? 1329 */ 1330bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) 1331{ 1332 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1333 1334 if (gtt == NULL || !gtt->userptr) 1335 return false; 1336 1337 return true; 1338} 1339 1340/* 1341 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? 1342 */ 1343bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 1344{ 1345 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1346 1347 if (gtt == NULL) 1348 return false; 1349 1350 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1351} 1352 1353/** 1354 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object 1355 * 1356 * @ttm: The ttm_tt object to compute the flags for 1357 * @mem: The memory registry backing this ttm_tt object 1358 * 1359 * Figure out the flags to use for a VM PDE (Page Directory Entry). 1360 */ 1361uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) 1362{ 1363 uint64_t flags = 0; 1364 1365 if (mem && mem->mem_type != TTM_PL_SYSTEM) 1366 flags |= AMDGPU_PTE_VALID; 1367 1368 if (mem && (mem->mem_type == TTM_PL_TT || 1369 mem->mem_type == AMDGPU_PL_DOORBELL || 1370 mem->mem_type == AMDGPU_PL_PREEMPT)) { 1371 flags |= AMDGPU_PTE_SYSTEM; 1372 1373 if (ttm->caching == ttm_cached) 1374 flags |= AMDGPU_PTE_SNOOPED; 1375 } 1376 1377 if (mem && mem->mem_type == TTM_PL_VRAM && 1378 mem->bus.caching == ttm_cached) 1379 flags |= AMDGPU_PTE_SNOOPED; 1380 1381 return flags; 1382} 1383 1384/** 1385 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object 1386 * 1387 * @adev: amdgpu_device pointer 1388 * @ttm: The ttm_tt object to compute the flags for 1389 * @mem: The memory registry backing this ttm_tt object 1390 * 1391 * Figure out the flags to use for a VM PTE (Page Table Entry). 1392 */ 1393uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 1394 struct ttm_resource *mem) 1395{ 1396 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); 1397 1398 flags |= adev->gart.gart_pte_flags; 1399 flags |= AMDGPU_PTE_READABLE; 1400 1401 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1402 flags |= AMDGPU_PTE_WRITEABLE; 1403 1404 return flags; 1405} 1406 1407/* 1408 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer 1409 * object. 1410 * 1411 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on 1412 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until 1413 * it can find space for a new object and by ttm_bo_force_list_clean() which is 1414 * used to clean out a memory space. 1415 */ 1416static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1417 const struct ttm_place *place) 1418{ 1419 struct dma_resv_iter resv_cursor; 1420 struct dma_fence *f; 1421 1422 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1423 return ttm_bo_eviction_valuable(bo, place); 1424 1425 /* Swapout? */ 1426 if (bo->resource->mem_type == TTM_PL_SYSTEM) 1427 return true; 1428 1429 if (bo->type == ttm_bo_type_kernel && 1430 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) 1431 return false; 1432 1433 /* If bo is a KFD BO, check if the bo belongs to the current process. 1434 * If true, then return false as any KFD process needs all its BOs to 1435 * be resident to run successfully 1436 */ 1437 dma_resv_for_each_fence(&resv_cursor, bo->base.resv, 1438 DMA_RESV_USAGE_BOOKKEEP, f) { 1439#ifdef notyet 1440 if (amdkfd_fence_check_mm(f, current->mm)) 1441 return false; 1442#endif 1443 } 1444 1445 /* Preemptible BOs don't own system resources managed by the 1446 * driver (pages, VRAM, GART space). They point to resources 1447 * owned by someone else (e.g. pageable memory in user mode 1448 * or a DMABuf). They are used in a preemptible context so we 1449 * can guarantee no deadlocks and good QoS in case of MMU 1450 * notifiers or DMABuf move notifiers from the resource owner. 1451 */ 1452 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT) 1453 return false; 1454 1455 if (bo->resource->mem_type == TTM_PL_TT && 1456 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) 1457 return false; 1458 1459 return ttm_bo_eviction_valuable(bo, place); 1460} 1461 1462static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, 1463 void *buf, size_t size, bool write) 1464{ 1465 STUB(); 1466#ifdef notyet 1467 while (size) { 1468 uint64_t aligned_pos = ALIGN_DOWN(pos, 4); 1469 uint64_t bytes = 4 - (pos & 0x3); 1470 uint32_t shift = (pos & 0x3) * 8; 1471 uint32_t mask = 0xffffffff << shift; 1472 uint32_t value = 0; 1473 1474 if (size < bytes) { 1475 mask &= 0xffffffff >> (bytes - size) * 8; 1476 bytes = size; 1477 } 1478 1479 if (mask != 0xffffffff) { 1480 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); 1481 if (write) { 1482 value &= ~mask; 1483 value |= (*(uint32_t *)buf << shift) & mask; 1484 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); 1485 } else { 1486 value = (value & mask) >> shift; 1487 memcpy(buf, &value, bytes); 1488 } 1489 } else { 1490 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); 1491 } 1492 1493 pos += bytes; 1494 buf += bytes; 1495 size -= bytes; 1496 } 1497#endif 1498} 1499 1500static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, 1501 unsigned long offset, void *buf, 1502 int len, int write) 1503{ 1504 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1505 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1506 struct amdgpu_res_cursor src_mm; 1507 struct amdgpu_job *job; 1508 struct dma_fence *fence; 1509 uint64_t src_addr, dst_addr; 1510 unsigned int num_dw; 1511 int r, idx; 1512 1513 if (len != PAGE_SIZE) 1514 return -EINVAL; 1515 1516 if (!adev->mman.sdma_access_ptr) 1517 return -EACCES; 1518 1519 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 1520 return -ENODEV; 1521 1522 if (write) 1523 memcpy(adev->mman.sdma_access_ptr, buf, len); 1524 1525 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 1526 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 1527 AMDGPU_FENCE_OWNER_UNDEFINED, 1528 num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1529 &job); 1530 if (r) 1531 goto out; 1532 1533 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); 1534 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + 1535 src_mm.start; 1536 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); 1537 if (write) 1538 swap(src_addr, dst_addr); 1539 1540 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, 1541 PAGE_SIZE, false); 1542 1543 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); 1544 WARN_ON(job->ibs[0].length_dw > num_dw); 1545 1546 fence = amdgpu_job_submit(job); 1547 1548 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) 1549 r = -ETIMEDOUT; 1550 dma_fence_put(fence); 1551 1552 if (!(r || write)) 1553 memcpy(buf, adev->mman.sdma_access_ptr, len); 1554out: 1555 drm_dev_exit(idx); 1556 return r; 1557} 1558 1559/** 1560 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. 1561 * 1562 * @bo: The buffer object to read/write 1563 * @offset: Offset into buffer object 1564 * @buf: Secondary buffer to write/read from 1565 * @len: Length in bytes of access 1566 * @write: true if writing 1567 * 1568 * This is used to access VRAM that backs a buffer object via MMIO 1569 * access for debugging purposes. 1570 */ 1571static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, 1572 unsigned long offset, void *buf, int len, 1573 int write) 1574{ 1575 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1576 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1577 struct amdgpu_res_cursor cursor; 1578 int ret = 0; 1579 1580 if (bo->resource->mem_type != TTM_PL_VRAM) 1581 return -EIO; 1582 1583 if (amdgpu_device_has_timeouts_enabled(adev) && 1584 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) 1585 return len; 1586 1587 amdgpu_res_first(bo->resource, offset, len, &cursor); 1588 while (cursor.remaining) { 1589 size_t count, size = cursor.size; 1590 loff_t pos = cursor.start; 1591 1592 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 1593 size -= count; 1594 if (size) { 1595 /* using MM to access rest vram and handle un-aligned address */ 1596 pos += count; 1597 buf += count; 1598 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); 1599 } 1600 1601 ret += cursor.size; 1602 buf += cursor.size; 1603 amdgpu_res_next(&cursor, cursor.size); 1604 } 1605 1606 return ret; 1607} 1608 1609static void 1610amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) 1611{ 1612 amdgpu_bo_move_notify(bo, false, NULL); 1613} 1614 1615static struct ttm_device_funcs amdgpu_bo_driver = { 1616 .ttm_tt_create = &amdgpu_ttm_tt_create, 1617 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1618 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1619 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, 1620 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, 1621 .evict_flags = &amdgpu_evict_flags, 1622 .move = &amdgpu_bo_move, 1623 .delete_mem_notify = &amdgpu_bo_delete_mem_notify, 1624 .release_notify = &amdgpu_bo_release_notify, 1625 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1626 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1627 .access_memory = &amdgpu_ttm_access_memory, 1628}; 1629 1630/* 1631 * Firmware Reservation functions 1632 */ 1633/** 1634 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram 1635 * 1636 * @adev: amdgpu_device pointer 1637 * 1638 * free fw reserved vram if it has been reserved. 1639 */ 1640static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) 1641{ 1642 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, 1643 NULL, &adev->mman.fw_vram_usage_va); 1644} 1645 1646/* 1647 * Driver Reservation functions 1648 */ 1649/** 1650 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram 1651 * 1652 * @adev: amdgpu_device pointer 1653 * 1654 * free drv reserved vram if it has been reserved. 1655 */ 1656static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) 1657{ 1658 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, 1659 NULL, 1660 &adev->mman.drv_vram_usage_va); 1661} 1662 1663/** 1664 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw 1665 * 1666 * @adev: amdgpu_device pointer 1667 * 1668 * create bo vram reservation from fw. 1669 */ 1670static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) 1671{ 1672 uint64_t vram_size = adev->gmc.visible_vram_size; 1673 1674 adev->mman.fw_vram_usage_va = NULL; 1675 adev->mman.fw_vram_usage_reserved_bo = NULL; 1676 1677 if (adev->mman.fw_vram_usage_size == 0 || 1678 adev->mman.fw_vram_usage_size > vram_size) 1679 return 0; 1680 1681 return amdgpu_bo_create_kernel_at(adev, 1682 adev->mman.fw_vram_usage_start_offset, 1683 adev->mman.fw_vram_usage_size, 1684 &adev->mman.fw_vram_usage_reserved_bo, 1685 &adev->mman.fw_vram_usage_va); 1686} 1687 1688/** 1689 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver 1690 * 1691 * @adev: amdgpu_device pointer 1692 * 1693 * create bo vram reservation from drv. 1694 */ 1695static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) 1696{ 1697 u64 vram_size = adev->gmc.visible_vram_size; 1698 1699 adev->mman.drv_vram_usage_va = NULL; 1700 adev->mman.drv_vram_usage_reserved_bo = NULL; 1701 1702 if (adev->mman.drv_vram_usage_size == 0 || 1703 adev->mman.drv_vram_usage_size > vram_size) 1704 return 0; 1705 1706 return amdgpu_bo_create_kernel_at(adev, 1707 adev->mman.drv_vram_usage_start_offset, 1708 adev->mman.drv_vram_usage_size, 1709 &adev->mman.drv_vram_usage_reserved_bo, 1710 &adev->mman.drv_vram_usage_va); 1711} 1712 1713/* 1714 * Memoy training reservation functions 1715 */ 1716 1717/** 1718 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram 1719 * 1720 * @adev: amdgpu_device pointer 1721 * 1722 * free memory training reserved vram if it has been reserved. 1723 */ 1724static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) 1725{ 1726 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1727 1728 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 1729 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); 1730 ctx->c2p_bo = NULL; 1731 1732 return 0; 1733} 1734 1735static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev, 1736 uint32_t reserve_size) 1737{ 1738 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1739 1740 memset(ctx, 0, sizeof(*ctx)); 1741 1742 ctx->c2p_train_data_offset = 1743 ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M); 1744 ctx->p2c_train_data_offset = 1745 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); 1746 ctx->train_data_size = 1747 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; 1748 1749 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 1750 ctx->train_data_size, 1751 ctx->p2c_train_data_offset, 1752 ctx->c2p_train_data_offset); 1753} 1754 1755/* 1756 * reserve TMR memory at the top of VRAM which holds 1757 * IP Discovery data and is protected by PSP. 1758 */ 1759static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) 1760{ 1761 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1762 bool mem_train_support = false; 1763 uint32_t reserve_size = 0; 1764 int ret; 1765 1766 if (adev->bios && !amdgpu_sriov_vf(adev)) { 1767 if (amdgpu_atomfirmware_mem_training_supported(adev)) 1768 mem_train_support = true; 1769 else 1770 DRM_DEBUG("memory training does not support!\n"); 1771 } 1772 1773 /* 1774 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all 1775 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) 1776 * 1777 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip 1778 * discovery data and G6 memory training data respectively 1779 */ 1780 if (adev->bios) 1781 reserve_size = 1782 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); 1783 1784 if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1785 reserve_size = max(reserve_size, (uint32_t)280 << 20); 1786 else if (!reserve_size) 1787 reserve_size = DISCOVERY_TMR_OFFSET; 1788 1789 if (mem_train_support) { 1790 /* reserve vram for mem train according to TMR location */ 1791 amdgpu_ttm_training_data_block_init(adev, reserve_size); 1792 ret = amdgpu_bo_create_kernel_at(adev, 1793 ctx->c2p_train_data_offset, 1794 ctx->train_data_size, 1795 &ctx->c2p_bo, 1796 NULL); 1797 if (ret) { 1798 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1799 amdgpu_ttm_training_reserve_vram_fini(adev); 1800 return ret; 1801 } 1802 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; 1803 } 1804 1805 if (!adev->gmc.is_app_apu) { 1806 ret = amdgpu_bo_create_kernel_at( 1807 adev, adev->gmc.real_vram_size - reserve_size, 1808 reserve_size, &adev->mman.fw_reserved_memory, NULL); 1809 if (ret) { 1810 DRM_ERROR("alloc tmr failed(%d)!\n", ret); 1811 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, 1812 NULL, NULL); 1813 return ret; 1814 } 1815 } else { 1816 DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); 1817 } 1818 1819 return 0; 1820} 1821 1822static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) 1823{ 1824 int i; 1825 1826 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions) 1827 return 0; 1828 1829 adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions, 1830 sizeof(*adev->mman.ttm_pools), 1831 GFP_KERNEL); 1832 if (!adev->mman.ttm_pools) 1833 return -ENOMEM; 1834 1835 for (i = 0; i < adev->gmc.num_mem_partitions; i++) { 1836 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, 1837 adev->gmc.mem_partitions[i].numa.node, 1838 false, false); 1839 } 1840 return 0; 1841} 1842 1843static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) 1844{ 1845 int i; 1846 1847 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools) 1848 return; 1849 1850 for (i = 0; i < adev->gmc.num_mem_partitions; i++) 1851 ttm_pool_fini(&adev->mman.ttm_pools[i]); 1852 1853 kfree(adev->mman.ttm_pools); 1854 adev->mman.ttm_pools = NULL; 1855} 1856 1857/* 1858 * amdgpu_ttm_init - Init the memory management (ttm) as well as various 1859 * gtt/vram related fields. 1860 * 1861 * This initializes all of the memory space pools that the TTM layer 1862 * will need such as the GTT space (system memory mapped to the device), 1863 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which 1864 * can be mapped per VMID. 1865 */ 1866int amdgpu_ttm_init(struct amdgpu_device *adev) 1867{ 1868 uint64_t gtt_size; 1869 int r; 1870 1871 rw_init(&adev->mman.gtt_window_lock, "gttwin"); 1872 1873 /* No others user of address space so set it to 0 */ 1874#ifdef notyet 1875 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, 1876 adev_to_drm(adev)->anon_inode->i_mapping, 1877 adev_to_drm(adev)->vma_offset_manager, 1878 adev->need_swiotlb, 1879 dma_addressing_limited(adev->dev)); 1880#else 1881 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, 1882 /*adev_to_drm(adev)->anon_inode->i_mapping*/NULL, 1883 adev_to_drm(adev)->vma_offset_manager, 1884 adev->need_swiotlb, 1885 dma_addressing_limited(adev->dev)); 1886#endif 1887 if (r) { 1888 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1889 return r; 1890 } 1891 1892 r = amdgpu_ttm_pools_init(adev); 1893 if (r) { 1894 DRM_ERROR("failed to init ttm pools(%d).\n", r); 1895 return r; 1896 } 1897 adev->mman.bdev.iot = adev->iot; 1898 adev->mman.bdev.memt = adev->memt; 1899 adev->mman.bdev.dmat = adev->dmat; 1900 adev->mman.initialized = true; 1901 1902 /* Initialize VRAM pool with all of VRAM divided into pages */ 1903 r = amdgpu_vram_mgr_init(adev); 1904 if (r) { 1905 DRM_ERROR("Failed initializing VRAM heap.\n"); 1906 return r; 1907 } 1908 1909 /* Change the size here instead of the init above so only lpfn is affected */ 1910 amdgpu_ttm_set_buffer_funcs_status(adev, false); 1911#if defined(CONFIG_64BIT) && defined(__linux__) 1912#ifdef CONFIG_X86 1913 if (adev->gmc.xgmi.connected_to_cpu) 1914 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, 1915 adev->gmc.visible_vram_size); 1916 1917 else if (adev->gmc.is_app_apu) 1918 DRM_DEBUG_DRIVER( 1919 "No need to ioremap when real vram size is 0\n"); 1920 else 1921#endif 1922 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, 1923 adev->gmc.visible_vram_size); 1924#else 1925 if (bus_space_map(adev->memt, adev->gmc.aper_base, 1926 adev->gmc.visible_vram_size, 1927 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, 1928 &adev->mman.aper_bsh)) { 1929 adev->mman.aper_base_kaddr = NULL; 1930 } else { 1931 adev->mman.aper_base_kaddr = bus_space_vaddr(adev->memt, 1932 adev->mman.aper_bsh); 1933 } 1934#endif 1935 1936 /* 1937 *The reserved vram for firmware must be pinned to the specified 1938 *place on the VRAM, so reserve it early. 1939 */ 1940 r = amdgpu_ttm_fw_reserve_vram_init(adev); 1941 if (r) 1942 return r; 1943 1944 /* 1945 *The reserved vram for driver must be pinned to the specified 1946 *place on the VRAM, so reserve it early. 1947 */ 1948 r = amdgpu_ttm_drv_reserve_vram_init(adev); 1949 if (r) 1950 return r; 1951 1952 /* 1953 * only NAVI10 and onwards ASIC support for IP discovery. 1954 * If IP discovery enabled, a block of memory should be 1955 * reserved for IP discovey. 1956 */ 1957 if (adev->mman.discovery_bin) { 1958 r = amdgpu_ttm_reserve_tmr(adev); 1959 if (r) 1960 return r; 1961 } 1962 1963 /* allocate memory as required for VGA 1964 * This is used for VGA emulation and pre-OS scanout buffers to 1965 * avoid display artifacts while transitioning between pre-OS 1966 * and driver. 1967 */ 1968 if (!adev->gmc.is_app_apu) { 1969 r = amdgpu_bo_create_kernel_at(adev, 0, 1970 adev->mman.stolen_vga_size, 1971 &adev->mman.stolen_vga_memory, 1972 NULL); 1973 if (r) 1974 return r; 1975 1976 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, 1977 adev->mman.stolen_extended_size, 1978 &adev->mman.stolen_extended_memory, 1979 NULL); 1980 1981 if (r) 1982 return r; 1983 1984 r = amdgpu_bo_create_kernel_at(adev, 1985 adev->mman.stolen_reserved_offset, 1986 adev->mman.stolen_reserved_size, 1987 &adev->mman.stolen_reserved_memory, 1988 NULL); 1989 if (r) 1990 return r; 1991 } else { 1992 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); 1993 } 1994 1995 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1996 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); 1997 1998 /* Compute GTT size, either based on TTM limit 1999 * or whatever the user passed on module init. 2000 */ 2001 if (amdgpu_gtt_size == -1) 2002 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT; 2003 else 2004 gtt_size = (uint64_t)amdgpu_gtt_size << 20; 2005 2006 /* Initialize GTT memory pool */ 2007 r = amdgpu_gtt_mgr_init(adev, gtt_size); 2008 if (r) { 2009 DRM_ERROR("Failed initializing GTT heap.\n"); 2010 return r; 2011 } 2012 DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 2013 (unsigned int)(gtt_size / (1024 * 1024))); 2014 2015 /* Initiailize doorbell pool on PCI BAR */ 2016 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); 2017 if (r) { 2018 DRM_ERROR("Failed initializing doorbell heap.\n"); 2019 return r; 2020 } 2021 2022 /* Create a boorbell page for kernel usages */ 2023 r = amdgpu_doorbell_create_kernel_doorbells(adev); 2024 if (r) { 2025 DRM_ERROR("Failed to initialize kernel doorbells.\n"); 2026 return r; 2027 } 2028 2029 /* Initialize preemptible memory pool */ 2030 r = amdgpu_preempt_mgr_init(adev); 2031 if (r) { 2032 DRM_ERROR("Failed initializing PREEMPT heap.\n"); 2033 return r; 2034 } 2035 2036 /* Initialize various on-chip memory pools */ 2037 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); 2038 if (r) { 2039 DRM_ERROR("Failed initializing GDS heap.\n"); 2040 return r; 2041 } 2042 2043 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); 2044 if (r) { 2045 DRM_ERROR("Failed initializing gws heap.\n"); 2046 return r; 2047 } 2048 2049 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); 2050 if (r) { 2051 DRM_ERROR("Failed initializing oa heap.\n"); 2052 return r; 2053 } 2054 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 2055 AMDGPU_GEM_DOMAIN_GTT, 2056 &adev->mman.sdma_access_bo, NULL, 2057 &adev->mman.sdma_access_ptr)) 2058 DRM_WARN("Debug VRAM access will use slowpath MM access\n"); 2059 2060 return 0; 2061} 2062 2063/* 2064 * amdgpu_ttm_fini - De-initialize the TTM memory pools 2065 */ 2066void amdgpu_ttm_fini(struct amdgpu_device *adev) 2067{ 2068 int idx; 2069 2070 if (!adev->mman.initialized) 2071 return; 2072 2073 amdgpu_ttm_pools_fini(adev); 2074 2075 amdgpu_ttm_training_reserve_vram_fini(adev); 2076 /* return the stolen vga memory back to VRAM */ 2077 if (!adev->gmc.is_app_apu) { 2078 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 2079 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 2080 /* return the FW reserved memory back to VRAM */ 2081 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, 2082 NULL); 2083 if (adev->mman.stolen_reserved_size) 2084 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, 2085 NULL, NULL); 2086 } 2087 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, 2088 &adev->mman.sdma_access_ptr); 2089 amdgpu_ttm_fw_reserve_vram_fini(adev); 2090 amdgpu_ttm_drv_reserve_vram_fini(adev); 2091 2092 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 2093 2094#ifdef __linux__ 2095 if (adev->mman.aper_base_kaddr) 2096 iounmap(adev->mman.aper_base_kaddr); 2097#else 2098 if (adev->mman.aper_base_kaddr) 2099 bus_space_unmap(adev->memt, adev->mman.aper_bsh, 2100 adev->gmc.visible_vram_size); 2101#endif 2102 adev->mman.aper_base_kaddr = NULL; 2103 2104 drm_dev_exit(idx); 2105 } 2106 2107 amdgpu_vram_mgr_fini(adev); 2108 amdgpu_gtt_mgr_fini(adev); 2109 amdgpu_preempt_mgr_fini(adev); 2110 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); 2111 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); 2112 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); 2113 ttm_device_fini(&adev->mman.bdev); 2114 adev->mman.initialized = false; 2115 DRM_INFO("amdgpu: ttm finalized\n"); 2116} 2117 2118/** 2119 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions 2120 * 2121 * @adev: amdgpu_device pointer 2122 * @enable: true when we can use buffer functions. 2123 * 2124 * Enable/disable use of buffer functions during suspend/resume. This should 2125 * only be called at bootup or when userspace isn't running. 2126 */ 2127void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2128{ 2129 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 2130 uint64_t size; 2131 int r; 2132 2133 if (!adev->mman.initialized || amdgpu_in_reset(adev) || 2134 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) 2135 return; 2136 2137 if (enable) { 2138 struct amdgpu_ring *ring; 2139 struct drm_gpu_scheduler *sched; 2140 2141 ring = adev->mman.buffer_funcs_ring; 2142 sched = &ring->sched; 2143 r = drm_sched_entity_init(&adev->mman.high_pr, 2144 DRM_SCHED_PRIORITY_KERNEL, &sched, 2145 1, NULL); 2146 if (r) { 2147 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2148 r); 2149 return; 2150 } 2151 2152 r = drm_sched_entity_init(&adev->mman.low_pr, 2153 DRM_SCHED_PRIORITY_NORMAL, &sched, 2154 1, NULL); 2155 if (r) { 2156 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2157 r); 2158 goto error_free_entity; 2159 } 2160 } else { 2161 drm_sched_entity_destroy(&adev->mman.high_pr); 2162 drm_sched_entity_destroy(&adev->mman.low_pr); 2163 dma_fence_put(man->move); 2164 man->move = NULL; 2165 } 2166 2167 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 2168 if (enable) 2169 size = adev->gmc.real_vram_size; 2170 else 2171 size = adev->gmc.visible_vram_size; 2172 man->size = size; 2173 adev->mman.buffer_funcs_enabled = enable; 2174 2175 return; 2176 2177error_free_entity: 2178 drm_sched_entity_destroy(&adev->mman.high_pr); 2179} 2180 2181static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, 2182 bool direct_submit, 2183 unsigned int num_dw, 2184 struct dma_resv *resv, 2185 bool vm_needs_flush, 2186 struct amdgpu_job **job, 2187 bool delayed) 2188{ 2189 enum amdgpu_ib_pool_type pool = direct_submit ? 2190 AMDGPU_IB_POOL_DIRECT : 2191 AMDGPU_IB_POOL_DELAYED; 2192 int r; 2193 struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr : 2194 &adev->mman.high_pr; 2195 r = amdgpu_job_alloc_with_ib(adev, entity, 2196 AMDGPU_FENCE_OWNER_UNDEFINED, 2197 num_dw * 4, pool, job); 2198 if (r) 2199 return r; 2200 2201 if (vm_needs_flush) { 2202 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? 2203 adev->gmc.pdb0_bo : 2204 adev->gart.bo); 2205 (*job)->vm_needs_flush = true; 2206 } 2207 if (!resv) 2208 return 0; 2209 2210 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, 2211 DMA_RESV_USAGE_BOOKKEEP); 2212} 2213 2214int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 2215 uint64_t dst_offset, uint32_t byte_count, 2216 struct dma_resv *resv, 2217 struct dma_fence **fence, bool direct_submit, 2218 bool vm_needs_flush, bool tmz) 2219{ 2220 struct amdgpu_device *adev = ring->adev; 2221 unsigned int num_loops, num_dw; 2222 struct amdgpu_job *job; 2223 uint32_t max_bytes; 2224 unsigned int i; 2225 int r; 2226 2227 if (!direct_submit && !ring->sched.ready) { 2228 DRM_ERROR("Trying to move memory with ring turned off.\n"); 2229 return -EINVAL; 2230 } 2231 2232 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 2233 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 2234 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); 2235 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, 2236 resv, vm_needs_flush, &job, false); 2237 if (r) 2238 return r; 2239 2240 for (i = 0; i < num_loops; i++) { 2241 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 2242 2243 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 2244 dst_offset, cur_size_in_bytes, tmz); 2245 2246 src_offset += cur_size_in_bytes; 2247 dst_offset += cur_size_in_bytes; 2248 byte_count -= cur_size_in_bytes; 2249 } 2250 2251 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2252 WARN_ON(job->ibs[0].length_dw > num_dw); 2253 if (direct_submit) 2254 r = amdgpu_job_submit_direct(job, ring, fence); 2255 else 2256 *fence = amdgpu_job_submit(job); 2257 if (r) 2258 goto error_free; 2259 2260 return r; 2261 2262error_free: 2263 amdgpu_job_free(job); 2264 DRM_ERROR("Error scheduling IBs (%d)\n", r); 2265 return r; 2266} 2267 2268static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, 2269 uint64_t dst_addr, uint32_t byte_count, 2270 struct dma_resv *resv, 2271 struct dma_fence **fence, 2272 bool vm_needs_flush, bool delayed) 2273{ 2274 struct amdgpu_device *adev = ring->adev; 2275 unsigned int num_loops, num_dw; 2276 struct amdgpu_job *job; 2277 uint32_t max_bytes; 2278 unsigned int i; 2279 int r; 2280 2281 max_bytes = adev->mman.buffer_funcs->fill_max_bytes; 2282 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); 2283 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); 2284 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, 2285 &job, delayed); 2286 if (r) 2287 return r; 2288 2289 for (i = 0; i < num_loops; i++) { 2290 uint32_t cur_size = min(byte_count, max_bytes); 2291 2292 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, 2293 cur_size); 2294 2295 dst_addr += cur_size; 2296 byte_count -= cur_size; 2297 } 2298 2299 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2300 WARN_ON(job->ibs[0].length_dw > num_dw); 2301 *fence = amdgpu_job_submit(job); 2302 return 0; 2303} 2304 2305int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2306 uint32_t src_data, 2307 struct dma_resv *resv, 2308 struct dma_fence **f, 2309 bool delayed) 2310{ 2311 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2312 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2313 struct dma_fence *fence = NULL; 2314 struct amdgpu_res_cursor dst; 2315 int r; 2316 2317 if (!adev->mman.buffer_funcs_enabled) { 2318 DRM_ERROR("Trying to clear memory with ring turned off.\n"); 2319 return -EINVAL; 2320 } 2321 2322 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); 2323 2324 mutex_lock(&adev->mman.gtt_window_lock); 2325 while (dst.remaining) { 2326 struct dma_fence *next; 2327 uint64_t cur_size, to; 2328 2329 /* Never fill more than 256MiB at once to avoid timeouts */ 2330 cur_size = min(dst.size, 256ULL << 20); 2331 2332 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst, 2333 1, ring, false, &cur_size, &to); 2334 if (r) 2335 goto error; 2336 2337 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, 2338 &next, true, delayed); 2339 if (r) 2340 goto error; 2341 2342 dma_fence_put(fence); 2343 fence = next; 2344 2345 amdgpu_res_next(&dst, cur_size); 2346 } 2347error: 2348 mutex_unlock(&adev->mman.gtt_window_lock); 2349 if (f) 2350 *f = dma_fence_get(fence); 2351 dma_fence_put(fence); 2352 return r; 2353} 2354 2355/** 2356 * amdgpu_ttm_evict_resources - evict memory buffers 2357 * @adev: amdgpu device object 2358 * @mem_type: evicted BO's memory type 2359 * 2360 * Evicts all @mem_type buffers on the lru list of the memory type. 2361 * 2362 * Returns: 2363 * 0 for success or a negative error code on failure. 2364 */ 2365int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) 2366{ 2367 struct ttm_resource_manager *man; 2368 2369 switch (mem_type) { 2370 case TTM_PL_VRAM: 2371 case TTM_PL_TT: 2372 case AMDGPU_PL_GWS: 2373 case AMDGPU_PL_GDS: 2374 case AMDGPU_PL_OA: 2375 man = ttm_manager_type(&adev->mman.bdev, mem_type); 2376 break; 2377 default: 2378 DRM_ERROR("Trying to evict invalid memory type\n"); 2379 return -EINVAL; 2380 } 2381 2382 return ttm_resource_manager_evict_all(&adev->mman.bdev, man); 2383} 2384 2385#if defined(CONFIG_DEBUG_FS) 2386 2387static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) 2388{ 2389 struct amdgpu_device *adev = m->private; 2390 2391 return ttm_pool_debugfs(&adev->mman.bdev.pool, m); 2392} 2393 2394DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); 2395 2396/* 2397 * amdgpu_ttm_vram_read - Linear read access to VRAM 2398 * 2399 * Accesses VRAM via MMIO for debugging purposes. 2400 */ 2401static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 2402 size_t size, loff_t *pos) 2403{ 2404 struct amdgpu_device *adev = file_inode(f)->i_private; 2405 ssize_t result = 0; 2406 2407 if (size & 0x3 || *pos & 0x3) 2408 return -EINVAL; 2409 2410 if (*pos >= adev->gmc.mc_vram_size) 2411 return -ENXIO; 2412 2413 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); 2414 while (size) { 2415 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); 2416 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; 2417 2418 amdgpu_device_vram_access(adev, *pos, value, bytes, false); 2419 if (copy_to_user(buf, value, bytes)) 2420 return -EFAULT; 2421 2422 result += bytes; 2423 buf += bytes; 2424 *pos += bytes; 2425 size -= bytes; 2426 } 2427 2428 return result; 2429} 2430 2431/* 2432 * amdgpu_ttm_vram_write - Linear write access to VRAM 2433 * 2434 * Accesses VRAM via MMIO for debugging purposes. 2435 */ 2436static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, 2437 size_t size, loff_t *pos) 2438{ 2439 struct amdgpu_device *adev = file_inode(f)->i_private; 2440 ssize_t result = 0; 2441 int r; 2442 2443 if (size & 0x3 || *pos & 0x3) 2444 return -EINVAL; 2445 2446 if (*pos >= adev->gmc.mc_vram_size) 2447 return -ENXIO; 2448 2449 while (size) { 2450 uint32_t value; 2451 2452 if (*pos >= adev->gmc.mc_vram_size) 2453 return result; 2454 2455 r = get_user(value, (uint32_t *)buf); 2456 if (r) 2457 return r; 2458 2459 amdgpu_device_mm_access(adev, *pos, &value, 4, true); 2460 2461 result += 4; 2462 buf += 4; 2463 *pos += 4; 2464 size -= 4; 2465 } 2466 2467 return result; 2468} 2469 2470static const struct file_operations amdgpu_ttm_vram_fops = { 2471 .owner = THIS_MODULE, 2472 .read = amdgpu_ttm_vram_read, 2473 .write = amdgpu_ttm_vram_write, 2474 .llseek = default_llseek, 2475}; 2476 2477/* 2478 * amdgpu_iomem_read - Virtual read access to GPU mapped memory 2479 * 2480 * This function is used to read memory that has been mapped to the 2481 * GPU and the known addresses are not physical addresses but instead 2482 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2483 */ 2484static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, 2485 size_t size, loff_t *pos) 2486{ 2487 struct amdgpu_device *adev = file_inode(f)->i_private; 2488 struct iommu_domain *dom; 2489 ssize_t result = 0; 2490 int r; 2491 2492 /* retrieve the IOMMU domain if any for this device */ 2493 dom = iommu_get_domain_for_dev(adev->dev); 2494 2495 while (size) { 2496 phys_addr_t addr = *pos & LINUX_PAGE_MASK; 2497 loff_t off = *pos & ~LINUX_PAGE_MASK; 2498 size_t bytes = PAGE_SIZE - off; 2499 unsigned long pfn; 2500 struct vm_page *p; 2501 void *ptr; 2502 2503 bytes = min(bytes, size); 2504 2505 /* Translate the bus address to a physical address. If 2506 * the domain is NULL it means there is no IOMMU active 2507 * and the address translation is the identity 2508 */ 2509 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2510 2511 pfn = addr >> PAGE_SHIFT; 2512 if (!pfn_valid(pfn)) 2513 return -EPERM; 2514 2515 p = pfn_to_page(pfn); 2516#ifdef notyet 2517 if (p->mapping != adev->mman.bdev.dev_mapping) 2518 return -EPERM; 2519#else 2520 STUB(); 2521#endif 2522 2523 ptr = kmap_local_page(p); 2524 r = copy_to_user(buf, ptr + off, bytes); 2525 kunmap_local(ptr); 2526 if (r) 2527 return -EFAULT; 2528 2529 size -= bytes; 2530 *pos += bytes; 2531 result += bytes; 2532 } 2533 2534 return result; 2535} 2536 2537/* 2538 * amdgpu_iomem_write - Virtual write access to GPU mapped memory 2539 * 2540 * This function is used to write memory that has been mapped to the 2541 * GPU and the known addresses are not physical addresses but instead 2542 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2543 */ 2544static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, 2545 size_t size, loff_t *pos) 2546{ 2547 struct amdgpu_device *adev = file_inode(f)->i_private; 2548 struct iommu_domain *dom; 2549 ssize_t result = 0; 2550 int r; 2551 2552 dom = iommu_get_domain_for_dev(adev->dev); 2553 2554 while (size) { 2555 phys_addr_t addr = *pos & LINUX_PAGE_MASK; 2556 loff_t off = *pos & ~LINUX_PAGE_MASK; 2557 size_t bytes = PAGE_SIZE - off; 2558 unsigned long pfn; 2559 struct vm_page *p; 2560 void *ptr; 2561 2562 bytes = min(bytes, size); 2563 2564 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2565 2566 pfn = addr >> PAGE_SHIFT; 2567 if (!pfn_valid(pfn)) 2568 return -EPERM; 2569 2570 p = pfn_to_page(pfn); 2571#ifdef notyet 2572 if (p->mapping != adev->mman.bdev.dev_mapping) 2573 return -EPERM; 2574#else 2575 STUB(); 2576#endif 2577 2578 ptr = kmap_local_page(p); 2579 r = copy_from_user(ptr + off, buf, bytes); 2580 kunmap_local(ptr); 2581 if (r) 2582 return -EFAULT; 2583 2584 size -= bytes; 2585 *pos += bytes; 2586 result += bytes; 2587 } 2588 2589 return result; 2590} 2591 2592static const struct file_operations amdgpu_ttm_iomem_fops = { 2593 .owner = THIS_MODULE, 2594 .read = amdgpu_iomem_read, 2595 .write = amdgpu_iomem_write, 2596 .llseek = default_llseek 2597}; 2598 2599#endif 2600 2601void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 2602{ 2603#if defined(CONFIG_DEBUG_FS) 2604 struct drm_minor *minor = adev_to_drm(adev)->primary; 2605 struct dentry *root = minor->debugfs_root; 2606 2607 debugfs_create_file_size("amdgpu_vram", 0444, root, adev, 2608 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); 2609 debugfs_create_file("amdgpu_iomem", 0444, root, adev, 2610 &amdgpu_ttm_iomem_fops); 2611 debugfs_create_file("ttm_page_pool", 0444, root, adev, 2612 &amdgpu_ttm_page_pool_fops); 2613 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2614 TTM_PL_VRAM), 2615 root, "amdgpu_vram_mm"); 2616 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2617 TTM_PL_TT), 2618 root, "amdgpu_gtt_mm"); 2619 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2620 AMDGPU_PL_GDS), 2621 root, "amdgpu_gds_mm"); 2622 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2623 AMDGPU_PL_GWS), 2624 root, "amdgpu_gws_mm"); 2625 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2626 AMDGPU_PL_OA), 2627 root, "amdgpu_oa_mm"); 2628 2629#endif 2630} 2631