1/* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * based on nouveau_prime.c 23 * 24 * Authors: Alex Deucher 25 */ 26 27/** 28 * DOC: PRIME Buffer Sharing 29 * 30 * The following callback implementations are used for :ref:`sharing GEM buffer 31 * objects between different devices via PRIME <prime_buffer_sharing>`. 32 */ 33 34#include "amdgpu.h" 35#include "amdgpu_display.h" 36#include "amdgpu_gem.h" 37#include "amdgpu_dma_buf.h" 38#include "amdgpu_xgmi.h" 39#include <drm/amdgpu_drm.h> 40#include <drm/ttm/ttm_tt.h> 41#include <linux/dma-buf.h> 42#include <linux/dma-fence-array.h> 43#include <linux/pci-p2pdma.h> 44#include <linux/pm_runtime.h> 45 46/** 47 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation 48 * 49 * @dmabuf: DMA-buf where we attach to 50 * @attach: attachment to add 51 * 52 * Add the attachment as user to the exported DMA-buf. 53 */ 54static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, 55 struct dma_buf_attachment *attach) 56{ 57 struct drm_gem_object *obj = dmabuf->priv; 58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 59 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 60 int r; 61 62#ifdef notyet 63 if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) 64 attach->peer2peer = false; 65#endif 66 67 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 68 if (r < 0) 69 goto out; 70 71 return 0; 72 73out: 74 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 75 return r; 76} 77 78#ifdef notyet 79 80/** 81 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation 82 * 83 * @dmabuf: DMA-buf where we remove the attachment from 84 * @attach: the attachment to remove 85 * 86 * Called when an attachment is removed from the DMA-buf. 87 */ 88static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, 89 struct dma_buf_attachment *attach) 90{ 91 struct drm_gem_object *obj = dmabuf->priv; 92 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 94 95 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 96 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 97} 98 99/** 100 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation 101 * 102 * @attach: attachment to pin down 103 * 104 * Pin the BO which is backing the DMA-buf so that it can't move any more. 105 */ 106static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) 107{ 108 struct drm_gem_object *obj = attach->dmabuf->priv; 109 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 110 111 /* pin buffer into GTT */ 112 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 113} 114 115/** 116 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation 117 * 118 * @attach: attachment to unpin 119 * 120 * Unpin a previously pinned BO to make it movable again. 121 */ 122static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) 123{ 124 struct drm_gem_object *obj = attach->dmabuf->priv; 125 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 126 127 amdgpu_bo_unpin(bo); 128} 129 130/** 131 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation 132 * @attach: DMA-buf attachment 133 * @dir: DMA direction 134 * 135 * Makes sure that the shared DMA buffer can be accessed by the target device. 136 * For now, simply pins it to the GTT domain, where it should be accessible by 137 * all DMA devices. 138 * 139 * Returns: 140 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error 141 * code. 142 */ 143static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, 144 enum dma_data_direction dir) 145{ 146 struct dma_buf *dma_buf = attach->dmabuf; 147 struct drm_gem_object *obj = dma_buf->priv; 148 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 149 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 150 struct sg_table *sgt; 151 long r; 152 153 if (!bo->tbo.pin_count) { 154 /* move buffer into GTT or VRAM */ 155 struct ttm_operation_ctx ctx = { false, false }; 156 unsigned int domains = AMDGPU_GEM_DOMAIN_GTT; 157 158 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && 159 attach->peer2peer) { 160 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 161 domains |= AMDGPU_GEM_DOMAIN_VRAM; 162 } 163 amdgpu_bo_placement_from_domain(bo, domains); 164 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 165 if (r) 166 return ERR_PTR(r); 167 168 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) & 169 AMDGPU_GEM_DOMAIN_GTT)) { 170 return ERR_PTR(-EBUSY); 171 } 172 173 switch (bo->tbo.resource->mem_type) { 174 case TTM_PL_TT: 175 sgt = drm_prime_pages_to_sg(obj->dev, 176 bo->tbo.ttm->pages, 177 bo->tbo.ttm->num_pages); 178 if (IS_ERR(sgt)) 179 return sgt; 180 181 if (dma_map_sgtable(attach->dev, sgt, dir, 182 DMA_ATTR_SKIP_CPU_SYNC)) 183 goto error_free; 184 break; 185 186 case TTM_PL_VRAM: 187 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, 188 bo->tbo.base.size, attach->dev, 189 dir, &sgt); 190 if (r) 191 return ERR_PTR(r); 192 break; 193 default: 194 return ERR_PTR(-EINVAL); 195 } 196 197 return sgt; 198 199error_free: 200 sg_free_table(sgt); 201 kfree(sgt); 202 return ERR_PTR(-EBUSY); 203} 204 205/** 206 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation 207 * @attach: DMA-buf attachment 208 * @sgt: sg_table to unmap 209 * @dir: DMA direction 210 * 211 * This is called when a shared DMA buffer no longer needs to be accessible by 212 * another device. For now, simply unpins the buffer from GTT. 213 */ 214static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, 215 struct sg_table *sgt, 216 enum dma_data_direction dir) 217{ 218 if (sgt->sgl->page_link) { 219 dma_unmap_sgtable(attach->dev, sgt, dir, 0); 220 sg_free_table(sgt); 221 kfree(sgt); 222 } else { 223 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt); 224 } 225} 226 227/** 228 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation 229 * @dma_buf: Shared DMA buffer 230 * @direction: Direction of DMA transfer 231 * 232 * This is called before CPU access to the shared DMA buffer's memory. If it's 233 * a read access, the buffer is moved to the GTT domain if possible, for optimal 234 * CPU read performance. 235 * 236 * Returns: 237 * 0 on success or a negative error code on failure. 238 */ 239static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 240 enum dma_data_direction direction) 241{ 242 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 243 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 244 struct ttm_operation_ctx ctx = { true, false }; 245 u32 domain = amdgpu_display_supported_domains(adev, bo->flags); 246 int ret; 247 bool reads = (direction == DMA_BIDIRECTIONAL || 248 direction == DMA_FROM_DEVICE); 249 250 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) 251 return 0; 252 253 /* move to gtt */ 254 ret = amdgpu_bo_reserve(bo, false); 255 if (unlikely(ret != 0)) 256 return ret; 257 258 if (!bo->tbo.pin_count && 259 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 260 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 261 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 262 } 263 264 amdgpu_bo_unreserve(bo); 265 return ret; 266} 267 268#endif /* notyet */ 269 270const struct dma_buf_ops amdgpu_dmabuf_ops = { 271#ifdef notyet 272 .attach = amdgpu_dma_buf_attach, 273 .detach = amdgpu_dma_buf_detach, 274 .pin = amdgpu_dma_buf_pin, 275 .unpin = amdgpu_dma_buf_unpin, 276 .map_dma_buf = amdgpu_dma_buf_map, 277 .unmap_dma_buf = amdgpu_dma_buf_unmap, 278#endif 279 .release = drm_gem_dmabuf_release, 280#ifdef notyet 281 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, 282 .mmap = drm_gem_dmabuf_mmap, 283 .vmap = drm_gem_dmabuf_vmap, 284 .vunmap = drm_gem_dmabuf_vunmap, 285#endif 286}; 287 288/** 289 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation 290 * @gobj: GEM BO 291 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. 292 * 293 * The main work is done by the &drm_gem_prime_export helper. 294 * 295 * Returns: 296 * Shared DMA buffer representing the GEM BO from the given device. 297 */ 298struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, 299 int flags) 300{ 301 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 302 struct dma_buf *buf; 303 304 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 305 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 306 return ERR_PTR(-EPERM); 307 308 buf = drm_gem_prime_export(gobj, flags); 309 if (!IS_ERR(buf)) 310 buf->ops = &amdgpu_dmabuf_ops; 311 312 return buf; 313} 314 315/** 316 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import 317 * 318 * @dev: DRM device 319 * @dma_buf: DMA-buf 320 * 321 * Creates an empty SG BO for DMA-buf import. 322 * 323 * Returns: 324 * A new GEM BO of the given DRM device, representing the memory 325 * described by the given DMA-buf attachment and scatter/gather table. 326 */ 327static struct drm_gem_object * 328amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) 329{ 330 struct dma_resv *resv = dma_buf->resv; 331 struct amdgpu_device *adev = drm_to_adev(dev); 332 struct drm_gem_object *gobj; 333 struct amdgpu_bo *bo; 334 uint64_t flags = 0; 335 int ret; 336 337 dma_resv_lock(resv, NULL); 338 339 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 340 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv); 341 342 flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC | 343 AMDGPU_GEM_CREATE_COHERENT | 344 AMDGPU_GEM_CREATE_UNCACHED); 345 } 346 347 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, 348 AMDGPU_GEM_DOMAIN_CPU, flags, 349 ttm_bo_type_sg, resv, &gobj, 0); 350 if (ret) 351 goto error; 352 353 bo = gem_to_amdgpu_bo(gobj); 354 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 355 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 356 357 dma_resv_unlock(resv); 358 return gobj; 359 360error: 361 dma_resv_unlock(resv); 362 return ERR_PTR(ret); 363} 364 365/** 366 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation 367 * 368 * @attach: the DMA-buf attachment 369 * 370 * Invalidate the DMA-buf attachment, making sure that the we re-create the 371 * mapping before the next use. 372 */ 373static void 374amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) 375{ 376 struct drm_gem_object *obj = attach->importer_priv; 377 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); 378 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 379 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 380 struct ttm_operation_ctx ctx = { false, false }; 381 struct ttm_placement placement = {}; 382 struct amdgpu_vm_bo_base *bo_base; 383 int r; 384 385 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 386 return; 387 388 r = ttm_bo_validate(&bo->tbo, &placement, &ctx); 389 if (r) { 390 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); 391 return; 392 } 393 394 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 395 struct amdgpu_vm *vm = bo_base->vm; 396 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 397 398 if (ticket) { 399 /* When we get an error here it means that somebody 400 * else is holding the VM lock and updating page tables 401 * So we can just continue here. 402 */ 403 r = dma_resv_lock(resv, ticket); 404 if (r) 405 continue; 406 407 } else { 408 /* TODO: This is more problematic and we actually need 409 * to allow page tables updates without holding the 410 * lock. 411 */ 412 if (!dma_resv_trylock(resv)) 413 continue; 414 } 415 416 /* Reserve fences for two SDMA page table updates */ 417 r = dma_resv_reserve_fences(resv, 2); 418 if (!r) 419 r = amdgpu_vm_clear_freed(adev, vm, NULL); 420 if (!r) 421 r = amdgpu_vm_handle_moved(adev, vm); 422 423 if (r && r != -EBUSY) 424 DRM_ERROR("Failed to invalidate VM page tables (%d))\n", 425 r); 426 427 dma_resv_unlock(resv); 428 } 429} 430 431static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { 432 .allow_peer2peer = true, 433 .move_notify = amdgpu_dma_buf_move_notify 434}; 435 436/** 437 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation 438 * @dev: DRM device 439 * @dma_buf: Shared DMA buffer 440 * 441 * Import a dma_buf into a the driver and potentially create a new GEM object. 442 * 443 * Returns: 444 * GEM BO representing the shared DMA buffer for the given device. 445 */ 446struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 447 struct dma_buf *dma_buf) 448{ 449 struct dma_buf_attachment *attach; 450 struct drm_gem_object *obj; 451 452 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 453 obj = dma_buf->priv; 454 if (obj->dev == dev) { 455 /* 456 * Importing dmabuf exported from out own gem increases 457 * refcount on gem itself instead of f_count of dmabuf. 458 */ 459 drm_gem_object_get(obj); 460 return obj; 461 } 462 } 463 464 obj = amdgpu_dma_buf_create_obj(dev, dma_buf); 465 if (IS_ERR(obj)) 466 return obj; 467 468 STUB(); 469#ifdef notyet 470 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, 471 &amdgpu_dma_buf_attach_ops, obj); 472 if (IS_ERR(attach)) { 473 drm_gem_object_put(obj); 474 return ERR_CAST(attach); 475 } 476#else 477 attach = NULL; 478#endif 479 480 get_dma_buf(dma_buf); 481 obj->import_attach = attach; 482 return obj; 483} 484 485/** 486 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer 487 * 488 * @adev: amdgpu_device pointer of the importer 489 * @bo: amdgpu buffer object 490 * 491 * Returns: 492 * True if dmabuf accessible over xgmi, false otherwise. 493 */ 494bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, 495 struct amdgpu_bo *bo) 496{ 497 struct drm_gem_object *obj = &bo->tbo.base; 498 struct drm_gem_object *gobj; 499 500 if (obj->import_attach) { 501#ifdef notyet 502 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 503 504 if (dma_buf->ops != &amdgpu_dmabuf_ops) 505 /* No XGMI with non AMD GPUs */ 506 return false; 507 508 gobj = dma_buf->priv; 509 bo = gem_to_amdgpu_bo(gobj); 510#else 511 return false; 512#endif 513 } 514 515 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 516 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) 517 return true; 518 519 return false; 520} 521