1/* $NetBSD: kfd_process.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $ */ 2 3/* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <sys/cdefs.h> 26__KERNEL_RCSID(0, "$NetBSD: kfd_process.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $"); 27 28#include <linux/mutex.h> 29#include <linux/log2.h> 30#include <linux/sched.h> 31#include <linux/sched/mm.h> 32#include <linux/sched/task.h> 33#include <linux/slab.h> 34#include <linux/amd-iommu.h> 35#include <linux/notifier.h> 36#include <linux/compat.h> 37#include <linux/mman.h> 38#include <linux/file.h> 39#include "amdgpu_amdkfd.h" 40#include "amdgpu.h" 41 42struct mm_struct; 43 44#include "kfd_priv.h" 45#include "kfd_device_queue_manager.h" 46#include "kfd_dbgmgr.h" 47#include "kfd_iommu.h" 48 49/* 50 * List of struct kfd_process (field kfd_process). 51 * Unique/indexed by mm_struct* 52 */ 53DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 54static DEFINE_MUTEX(kfd_processes_mutex); 55 56DEFINE_SRCU(kfd_processes_srcu); 57 58/* For process termination handling */ 59static struct workqueue_struct *kfd_process_wq; 60 61/* Ordered, single-threaded workqueue for restoring evicted 62 * processes. Restoring multiple processes concurrently under memory 63 * pressure can lead to processes blocking each other from validating 64 * their BOs and result in a live-lock situation where processes 65 * remain evicted indefinitely. 66 */ 67static struct workqueue_struct *kfd_restore_wq; 68 69static struct kfd_process *find_process(const struct task_struct *thread); 70static void kfd_process_ref_release(struct kref *ref); 71static struct kfd_process *create_process(const struct task_struct *thread); 72static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); 73 74static void evict_process_worker(struct work_struct *work); 75static void restore_process_worker(struct work_struct *work); 76 77struct kfd_procfs_tree { 78 struct kobject *kobj; 79}; 80 81static struct kfd_procfs_tree procfs; 82 83static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, 84 char *buffer) 85{ 86 int val = 0; 87 88 if (strcmp(attr->name, "pasid") == 0) { 89 struct kfd_process *p = container_of(attr, struct kfd_process, 90 attr_pasid); 91 val = p->pasid; 92 } else { 93 pr_err("Invalid attribute"); 94 return -EINVAL; 95 } 96 97 return snprintf(buffer, PAGE_SIZE, "%d\n", val); 98} 99 100static void kfd_procfs_kobj_release(struct kobject *kobj) 101{ 102 kfree(kobj); 103} 104 105static const struct sysfs_ops kfd_procfs_ops = { 106 .show = kfd_procfs_show, 107}; 108 109static struct kobj_type procfs_type = { 110 .release = kfd_procfs_kobj_release, 111 .sysfs_ops = &kfd_procfs_ops, 112}; 113 114void kfd_procfs_init(void) 115{ 116 int ret = 0; 117 118 procfs.kobj = kfd_alloc_struct(procfs.kobj); 119 if (!procfs.kobj) 120 return; 121 122 ret = kobject_init_and_add(procfs.kobj, &procfs_type, 123 &kfd_device->kobj, "proc"); 124 if (ret) { 125 pr_warn("Could not create procfs proc folder"); 126 /* If we fail to create the procfs, clean up */ 127 kfd_procfs_shutdown(); 128 } 129} 130 131void kfd_procfs_shutdown(void) 132{ 133 if (procfs.kobj) { 134 kobject_del(procfs.kobj); 135 kobject_put(procfs.kobj); 136 procfs.kobj = NULL; 137 } 138} 139 140int kfd_process_create_wq(void) 141{ 142 if (!kfd_process_wq) 143 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); 144 if (!kfd_restore_wq) 145 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); 146 147 if (!kfd_process_wq || !kfd_restore_wq) { 148 kfd_process_destroy_wq(); 149 return -ENOMEM; 150 } 151 152 return 0; 153} 154 155void kfd_process_destroy_wq(void) 156{ 157 if (kfd_process_wq) { 158 destroy_workqueue(kfd_process_wq); 159 kfd_process_wq = NULL; 160 } 161 if (kfd_restore_wq) { 162 destroy_workqueue(kfd_restore_wq); 163 kfd_restore_wq = NULL; 164 } 165} 166 167static void kfd_process_free_gpuvm(struct kgd_mem *mem, 168 struct kfd_process_device *pdd) 169{ 170 struct kfd_dev *dev = pdd->dev; 171 172 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); 173 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem); 174} 175 176/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process 177 * This function should be only called right after the process 178 * is created and when kfd_processes_mutex is still being held 179 * to avoid concurrency. Because of that exclusiveness, we do 180 * not need to take p->mutex. 181 */ 182static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, 183 uint64_t gpu_va, uint32_t size, 184 uint32_t flags, void **kptr) 185{ 186 struct kfd_dev *kdev = pdd->dev; 187 struct kgd_mem *mem = NULL; 188 int handle; 189 int err; 190 191 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, 192 pdd->vm, &mem, NULL, flags); 193 if (err) 194 goto err_alloc_mem; 195 196 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); 197 if (err) 198 goto err_map_mem; 199 200 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); 201 if (err) { 202 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 203 goto sync_memory_failed; 204 } 205 206 /* Create an obj handle so kfd_process_device_remove_obj_handle 207 * will take care of the bo removal when the process finishes. 208 * We do not need to take p->mutex, because the process is just 209 * created and the ioctls have not had the chance to run. 210 */ 211 handle = kfd_process_device_create_obj_handle(pdd, mem); 212 213 if (handle < 0) { 214 err = handle; 215 goto free_gpuvm; 216 } 217 218 if (kptr) { 219 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, 220 (struct kgd_mem *)mem, kptr, NULL); 221 if (err) { 222 pr_debug("Map GTT BO to kernel failed\n"); 223 goto free_obj_handle; 224 } 225 } 226 227 return err; 228 229free_obj_handle: 230 kfd_process_device_remove_obj_handle(pdd, handle); 231free_gpuvm: 232sync_memory_failed: 233 kfd_process_free_gpuvm(mem, pdd); 234 return err; 235 236err_map_mem: 237 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem); 238err_alloc_mem: 239 *kptr = NULL; 240 return err; 241} 242 243/* kfd_process_device_reserve_ib_mem - Reserve memory inside the 244 * process for IB usage The memory reserved is for KFD to submit 245 * IB to AMDGPU from kernel. If the memory is reserved 246 * successfully, ib_kaddr will have the CPU/kernel 247 * address. Check ib_kaddr before accessing the memory. 248 */ 249static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) 250{ 251 struct qcm_process_device *qpd = &pdd->qpd; 252 uint32_t flags = ALLOC_MEM_FLAGS_GTT | 253 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | 254 ALLOC_MEM_FLAGS_WRITABLE | 255 ALLOC_MEM_FLAGS_EXECUTABLE; 256 void *kaddr; 257 int ret; 258 259 if (qpd->ib_kaddr || !qpd->ib_base) 260 return 0; 261 262 /* ib_base is only set for dGPU */ 263 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, 264 &kaddr); 265 if (ret) 266 return ret; 267 268 qpd->ib_kaddr = kaddr; 269 270 return 0; 271} 272 273struct kfd_process *kfd_create_process(struct file *filep) 274{ 275 struct kfd_process *process; 276 struct task_struct *thread = current; 277 int ret; 278 279 if (!thread->mm) 280 return ERR_PTR(-EINVAL); 281 282 /* Only the pthreads threading model is supported. */ 283 if (thread->group_leader->mm != thread->mm) 284 return ERR_PTR(-EINVAL); 285 286 /* 287 * take kfd processes mutex before starting of process creation 288 * so there won't be a case where two threads of the same process 289 * create two kfd_process structures 290 */ 291 mutex_lock(&kfd_processes_mutex); 292 293 /* A prior open of /dev/kfd could have already created the process. */ 294 process = find_process(thread); 295 if (process) { 296 pr_debug("Process already found\n"); 297 } else { 298 process = create_process(thread); 299 if (IS_ERR(process)) 300 goto out; 301 302 ret = kfd_process_init_cwsr_apu(process, filep); 303 if (ret) { 304 process = ERR_PTR(ret); 305 goto out; 306 } 307 308 if (!procfs.kobj) 309 goto out; 310 311 process->kobj = kfd_alloc_struct(process->kobj); 312 if (!process->kobj) { 313 pr_warn("Creating procfs kobject failed"); 314 goto out; 315 } 316 ret = kobject_init_and_add(process->kobj, &procfs_type, 317 procfs.kobj, "%d", 318 (int)process->lead_thread->pid); 319 if (ret) { 320 pr_warn("Creating procfs pid directory failed"); 321 goto out; 322 } 323 324 process->attr_pasid.name = "pasid"; 325 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE; 326 sysfs_attr_init(&process->attr_pasid); 327 ret = sysfs_create_file(process->kobj, &process->attr_pasid); 328 if (ret) 329 pr_warn("Creating pasid for pid %d failed", 330 (int)process->lead_thread->pid); 331 } 332out: 333 if (!IS_ERR(process)) 334 kref_get(&process->ref); 335 mutex_unlock(&kfd_processes_mutex); 336 337 return process; 338} 339 340struct kfd_process *kfd_get_process(const struct task_struct *thread) 341{ 342 struct kfd_process *process; 343 344 if (!thread->mm) 345 return ERR_PTR(-EINVAL); 346 347 /* Only the pthreads threading model is supported. */ 348 if (thread->group_leader->mm != thread->mm) 349 return ERR_PTR(-EINVAL); 350 351 process = find_process(thread); 352 if (!process) 353 return ERR_PTR(-EINVAL); 354 355 return process; 356} 357 358static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 359{ 360 struct kfd_process *process; 361 362 hash_for_each_possible_rcu(kfd_processes_table, process, 363 kfd_processes, (uintptr_t)mm) 364 if (process->mm == mm) 365 return process; 366 367 return NULL; 368} 369 370static struct kfd_process *find_process(const struct task_struct *thread) 371{ 372 struct kfd_process *p; 373 int idx; 374 375 idx = srcu_read_lock(&kfd_processes_srcu); 376 p = find_process_by_mm(thread->mm); 377 srcu_read_unlock(&kfd_processes_srcu, idx); 378 379 return p; 380} 381 382void kfd_unref_process(struct kfd_process *p) 383{ 384 kref_put(&p->ref, kfd_process_ref_release); 385} 386 387static void kfd_process_device_free_bos(struct kfd_process_device *pdd) 388{ 389 struct kfd_process *p = pdd->process; 390 void *mem; 391 int id; 392 393 /* 394 * Remove all handles from idr and release appropriate 395 * local memory object 396 */ 397 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 398 struct kfd_process_device *peer_pdd; 399 400 list_for_each_entry(peer_pdd, &p->per_device_data, 401 per_device_list) { 402 if (!peer_pdd->vm) 403 continue; 404 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 405 peer_pdd->dev->kgd, mem, peer_pdd->vm); 406 } 407 408 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem); 409 kfd_process_device_remove_obj_handle(pdd, id); 410 } 411} 412 413static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) 414{ 415 struct kfd_process_device *pdd; 416 417 list_for_each_entry(pdd, &p->per_device_data, per_device_list) 418 kfd_process_device_free_bos(pdd); 419} 420 421static void kfd_process_destroy_pdds(struct kfd_process *p) 422{ 423 struct kfd_process_device *pdd, *temp; 424 425 list_for_each_entry_safe(pdd, temp, &p->per_device_data, 426 per_device_list) { 427 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", 428 pdd->dev->id, p->pasid); 429 430 if (pdd->drm_file) { 431 amdgpu_amdkfd_gpuvm_release_process_vm( 432 pdd->dev->kgd, pdd->vm); 433 fput(pdd->drm_file); 434 } 435 else if (pdd->vm) 436 amdgpu_amdkfd_gpuvm_destroy_process_vm( 437 pdd->dev->kgd, pdd->vm); 438 439 list_del(&pdd->per_device_list); 440 441 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 442 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, 443 get_order(KFD_CWSR_TBA_TMA_SIZE)); 444 445 kfree(pdd->qpd.doorbell_bitmap); 446 idr_destroy(&pdd->alloc_idr); 447 448 kfree(pdd); 449 } 450} 451 452/* No process locking is needed in this function, because the process 453 * is not findable any more. We must assume that no other thread is 454 * using it any more, otherwise we couldn't safely free the process 455 * structure in the end. 456 */ 457static void kfd_process_wq_release(struct work_struct *work) 458{ 459 struct kfd_process *p = container_of(work, struct kfd_process, 460 release_work); 461 462 /* Remove the procfs files */ 463 if (p->kobj) { 464 sysfs_remove_file(p->kobj, &p->attr_pasid); 465 kobject_del(p->kobj); 466 kobject_put(p->kobj); 467 p->kobj = NULL; 468 } 469 470 kfd_iommu_unbind_process(p); 471 472 kfd_process_free_outstanding_kfd_bos(p); 473 474 kfd_process_destroy_pdds(p); 475 dma_fence_put(p->ef); 476 477 kfd_event_free_process(p); 478 479 kfd_pasid_free(p->pasid); 480 kfd_free_process_doorbells(p); 481 482 mutex_destroy(&p->mutex); 483 484 put_task_struct(p->lead_thread); 485 486 kfree(p); 487} 488 489static void kfd_process_ref_release(struct kref *ref) 490{ 491 struct kfd_process *p = container_of(ref, struct kfd_process, ref); 492 493 INIT_WORK(&p->release_work, kfd_process_wq_release); 494 queue_work(kfd_process_wq, &p->release_work); 495} 496 497static void kfd_process_free_notifier(struct mmu_notifier *mn) 498{ 499 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); 500} 501 502static void kfd_process_notifier_release(struct mmu_notifier *mn, 503 struct mm_struct *mm) 504{ 505 struct kfd_process *p; 506 struct kfd_process_device *pdd = NULL; 507 508 /* 509 * The kfd_process structure can not be free because the 510 * mmu_notifier srcu is read locked 511 */ 512 p = container_of(mn, struct kfd_process, mmu_notifier); 513 if (WARN_ON(p->mm != mm)) 514 return; 515 516 mutex_lock(&kfd_processes_mutex); 517 hash_del_rcu(&p->kfd_processes); 518 mutex_unlock(&kfd_processes_mutex); 519 synchronize_srcu(&kfd_processes_srcu); 520 521 cancel_delayed_work_sync(&p->eviction_work); 522 cancel_delayed_work_sync(&p->restore_work); 523 524 mutex_lock(&p->mutex); 525 526 /* Iterate over all process device data structures and if the 527 * pdd is in debug mode, we should first force unregistration, 528 * then we will be able to destroy the queues 529 */ 530 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 531 struct kfd_dev *dev = pdd->dev; 532 533 mutex_lock(kfd_get_dbgmgr_mutex()); 534 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { 535 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { 536 kfd_dbgmgr_destroy(dev->dbgmgr); 537 dev->dbgmgr = NULL; 538 } 539 } 540 mutex_unlock(kfd_get_dbgmgr_mutex()); 541 } 542 543 kfd_process_dequeue_from_all_devices(p); 544 pqm_uninit(&p->pqm); 545 546 /* Indicate to other users that MM is no longer valid */ 547 p->mm = NULL; 548 549 mutex_unlock(&p->mutex); 550 551 mmu_notifier_put(&p->mmu_notifier); 552} 553 554static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 555 .release = kfd_process_notifier_release, 556 .free_notifier = kfd_process_free_notifier, 557}; 558 559static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) 560{ 561 unsigned long offset; 562 struct kfd_process_device *pdd; 563 564 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 565 struct kfd_dev *dev = pdd->dev; 566 struct qcm_process_device *qpd = &pdd->qpd; 567 568 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) 569 continue; 570 571 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); 572 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, 573 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, 574 MAP_SHARED, offset); 575 576 if (IS_ERR_VALUE(qpd->tba_addr)) { 577 int err = qpd->tba_addr; 578 579 pr_err("Failure to set tba address. error %d.\n", err); 580 qpd->tba_addr = 0; 581 qpd->cwsr_kaddr = NULL; 582 return err; 583 } 584 585 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); 586 587 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 588 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 589 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 590 } 591 592 return 0; 593} 594 595static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) 596{ 597 struct kfd_dev *dev = pdd->dev; 598 struct qcm_process_device *qpd = &pdd->qpd; 599 uint32_t flags = ALLOC_MEM_FLAGS_GTT | 600 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; 601 void *kaddr; 602 int ret; 603 604 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) 605 return 0; 606 607 /* cwsr_base is only set for dGPU */ 608 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, 609 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr); 610 if (ret) 611 return ret; 612 613 qpd->cwsr_kaddr = kaddr; 614 qpd->tba_addr = qpd->cwsr_base; 615 616 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); 617 618 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 619 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 620 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 621 622 return 0; 623} 624 625/* 626 * On return the kfd_process is fully operational and will be freed when the 627 * mm is released 628 */ 629static struct kfd_process *create_process(const struct task_struct *thread) 630{ 631 struct kfd_process *process; 632 int err = -ENOMEM; 633 634 process = kzalloc(sizeof(*process), GFP_KERNEL); 635 if (!process) 636 goto err_alloc_process; 637 638 kref_init(&process->ref); 639 mutex_init(&process->mutex); 640 process->mm = thread->mm; 641 process->lead_thread = thread->group_leader; 642 INIT_LIST_HEAD(&process->per_device_data); 643 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 644 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 645 process->last_restore_timestamp = get_jiffies_64(); 646 kfd_event_init_process(process); 647 process->is_32bit_user_mode = in_compat_syscall(); 648 649 process->pasid = kfd_pasid_alloc(); 650 if (process->pasid == 0) 651 goto err_alloc_pasid; 652 653 if (kfd_alloc_process_doorbells(process) < 0) 654 goto err_alloc_doorbells; 655 656 err = pqm_init(&process->pqm, process); 657 if (err != 0) 658 goto err_process_pqm_init; 659 660 /* init process apertures*/ 661 err = kfd_init_apertures(process); 662 if (err != 0) 663 goto err_init_apertures; 664 665 /* Must be last, have to use release destruction after this */ 666 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops; 667 err = mmu_notifier_register(&process->mmu_notifier, process->mm); 668 if (err) 669 goto err_register_notifier; 670 671 get_task_struct(process->lead_thread); 672 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 673 (uintptr_t)process->mm); 674 675 return process; 676 677err_register_notifier: 678 kfd_process_free_outstanding_kfd_bos(process); 679 kfd_process_destroy_pdds(process); 680err_init_apertures: 681 pqm_uninit(&process->pqm); 682err_process_pqm_init: 683 kfd_free_process_doorbells(process); 684err_alloc_doorbells: 685 kfd_pasid_free(process->pasid); 686err_alloc_pasid: 687 mutex_destroy(&process->mutex); 688 kfree(process); 689err_alloc_process: 690 return ERR_PTR(err); 691} 692 693static int init_doorbell_bitmap(struct qcm_process_device *qpd, 694 struct kfd_dev *dev) 695{ 696 unsigned int i; 697 int range_start = dev->shared_resources.non_cp_doorbells_start; 698 int range_end = dev->shared_resources.non_cp_doorbells_end; 699 700 if (!KFD_IS_SOC15(dev->device_info->asic_family)) 701 return 0; 702 703 qpd->doorbell_bitmap = 704 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, 705 BITS_PER_BYTE), GFP_KERNEL); 706 if (!qpd->doorbell_bitmap) 707 return -ENOMEM; 708 709 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ 710 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); 711 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", 712 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, 713 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); 714 715 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { 716 if (i >= range_start && i <= range_end) { 717 set_bit(i, qpd->doorbell_bitmap); 718 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, 719 qpd->doorbell_bitmap); 720 } 721 } 722 723 return 0; 724} 725 726struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 727 struct kfd_process *p) 728{ 729 struct kfd_process_device *pdd = NULL; 730 731 list_for_each_entry(pdd, &p->per_device_data, per_device_list) 732 if (pdd->dev == dev) 733 return pdd; 734 735 return NULL; 736} 737 738struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 739 struct kfd_process *p) 740{ 741 struct kfd_process_device *pdd = NULL; 742 743 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 744 if (!pdd) 745 return NULL; 746 747 if (init_doorbell_bitmap(&pdd->qpd, dev)) { 748 pr_err("Failed to init doorbell for process\n"); 749 kfree(pdd); 750 return NULL; 751 } 752 753 pdd->dev = dev; 754 INIT_LIST_HEAD(&pdd->qpd.queues_list); 755 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 756 pdd->qpd.dqm = dev->dqm; 757 pdd->qpd.pqm = &p->pqm; 758 pdd->qpd.evicted = 0; 759 pdd->process = p; 760 pdd->bound = PDD_UNBOUND; 761 pdd->already_dequeued = false; 762 list_add(&pdd->per_device_list, &p->per_device_data); 763 764 /* Init idr used for memory handle translation */ 765 idr_init(&pdd->alloc_idr); 766 767 return pdd; 768} 769 770/** 771 * kfd_process_device_init_vm - Initialize a VM for a process-device 772 * 773 * @pdd: The process-device 774 * @drm_file: Optional pointer to a DRM file descriptor 775 * 776 * If @drm_file is specified, it will be used to acquire the VM from 777 * that file descriptor. If successful, the @pdd takes ownership of 778 * the file descriptor. 779 * 780 * If @drm_file is NULL, a new VM is created. 781 * 782 * Returns 0 on success, -errno on failure. 783 */ 784int kfd_process_device_init_vm(struct kfd_process_device *pdd, 785 struct file *drm_file) 786{ 787 struct kfd_process *p; 788 struct kfd_dev *dev; 789 int ret; 790 791 if (pdd->vm) 792 return drm_file ? -EBUSY : 0; 793 794 p = pdd->process; 795 dev = pdd->dev; 796 797 if (drm_file) 798 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( 799 dev->kgd, drm_file, p->pasid, 800 &pdd->vm, &p->kgd_process_info, &p->ef); 801 else 802 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid, 803 &pdd->vm, &p->kgd_process_info, &p->ef); 804 if (ret) { 805 pr_err("Failed to create process VM object\n"); 806 return ret; 807 } 808 809 amdgpu_vm_set_task_info(pdd->vm); 810 811 ret = kfd_process_device_reserve_ib_mem(pdd); 812 if (ret) 813 goto err_reserve_ib_mem; 814 ret = kfd_process_device_init_cwsr_dgpu(pdd); 815 if (ret) 816 goto err_init_cwsr; 817 818 pdd->drm_file = drm_file; 819 820 return 0; 821 822err_init_cwsr: 823err_reserve_ib_mem: 824 kfd_process_device_free_bos(pdd); 825 if (!drm_file) 826 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); 827 pdd->vm = NULL; 828 829 return ret; 830} 831 832/* 833 * Direct the IOMMU to bind the process (specifically the pasid->mm) 834 * to the device. 835 * Unbinding occurs when the process dies or the device is removed. 836 * 837 * Assumes that the process lock is held. 838 */ 839struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 840 struct kfd_process *p) 841{ 842 struct kfd_process_device *pdd; 843 int err; 844 845 pdd = kfd_get_process_device_data(dev, p); 846 if (!pdd) { 847 pr_err("Process device data doesn't exist\n"); 848 return ERR_PTR(-ENOMEM); 849 } 850 851 err = kfd_iommu_bind_process_to_device(pdd); 852 if (err) 853 return ERR_PTR(err); 854 855 err = kfd_process_device_init_vm(pdd, NULL); 856 if (err) 857 return ERR_PTR(err); 858 859 return pdd; 860} 861 862struct kfd_process_device *kfd_get_first_process_device_data( 863 struct kfd_process *p) 864{ 865 return list_first_entry(&p->per_device_data, 866 struct kfd_process_device, 867 per_device_list); 868} 869 870struct kfd_process_device *kfd_get_next_process_device_data( 871 struct kfd_process *p, 872 struct kfd_process_device *pdd) 873{ 874 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) 875 return NULL; 876 return list_next_entry(pdd, per_device_list); 877} 878 879bool kfd_has_process_device_data(struct kfd_process *p) 880{ 881 return !(list_empty(&p->per_device_data)); 882} 883 884/* Create specific handle mapped to mem from process local memory idr 885 * Assumes that the process lock is held. 886 */ 887int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 888 void *mem) 889{ 890 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); 891} 892 893/* Translate specific handle from process local memory idr 894 * Assumes that the process lock is held. 895 */ 896void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, 897 int handle) 898{ 899 if (handle < 0) 900 return NULL; 901 902 return idr_find(&pdd->alloc_idr, handle); 903} 904 905/* Remove specific handle from process local memory idr 906 * Assumes that the process lock is held. 907 */ 908void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 909 int handle) 910{ 911 if (handle >= 0) 912 idr_remove(&pdd->alloc_idr, handle); 913} 914 915/* This increments the process->ref counter. */ 916struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid) 917{ 918 struct kfd_process *p, *ret_p = NULL; 919 unsigned int temp; 920 921 int idx = srcu_read_lock(&kfd_processes_srcu); 922 923 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 924 if (p->pasid == pasid) { 925 kref_get(&p->ref); 926 ret_p = p; 927 break; 928 } 929 } 930 931 srcu_read_unlock(&kfd_processes_srcu, idx); 932 933 return ret_p; 934} 935 936/* This increments the process->ref counter. */ 937struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) 938{ 939 struct kfd_process *p; 940 941 int idx = srcu_read_lock(&kfd_processes_srcu); 942 943 p = find_process_by_mm(mm); 944 if (p) 945 kref_get(&p->ref); 946 947 srcu_read_unlock(&kfd_processes_srcu, idx); 948 949 return p; 950} 951 952/* process_evict_queues - Evict all user queues of a process 953 * 954 * Eviction is reference-counted per process-device. This means multiple 955 * evictions from different sources can be nested safely. 956 */ 957int kfd_process_evict_queues(struct kfd_process *p) 958{ 959 struct kfd_process_device *pdd; 960 int r = 0; 961 unsigned int n_evicted = 0; 962 963 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 964 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 965 &pdd->qpd); 966 if (r) { 967 pr_err("Failed to evict process queues\n"); 968 goto fail; 969 } 970 n_evicted++; 971 } 972 973 return r; 974 975fail: 976 /* To keep state consistent, roll back partial eviction by 977 * restoring queues 978 */ 979 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 980 if (n_evicted == 0) 981 break; 982 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 983 &pdd->qpd)) 984 pr_err("Failed to restore queues\n"); 985 986 n_evicted--; 987 } 988 989 return r; 990} 991 992/* process_restore_queues - Restore all user queues of a process */ 993int kfd_process_restore_queues(struct kfd_process *p) 994{ 995 struct kfd_process_device *pdd; 996 int r, ret = 0; 997 998 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 999 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1000 &pdd->qpd); 1001 if (r) { 1002 pr_err("Failed to restore process queues\n"); 1003 if (!ret) 1004 ret = r; 1005 } 1006 } 1007 1008 return ret; 1009} 1010 1011static void evict_process_worker(struct work_struct *work) 1012{ 1013 int ret; 1014 struct kfd_process *p; 1015 struct delayed_work *dwork; 1016 1017 dwork = to_delayed_work(work); 1018 1019 /* Process termination destroys this worker thread. So during the 1020 * lifetime of this thread, kfd_process p will be valid 1021 */ 1022 p = container_of(dwork, struct kfd_process, eviction_work); 1023 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, 1024 "Eviction fence mismatch\n"); 1025 1026 /* Narrow window of overlap between restore and evict work 1027 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos 1028 * unreserves KFD BOs, it is possible to evicted again. But 1029 * restore has few more steps of finish. So lets wait for any 1030 * previous restore work to complete 1031 */ 1032 flush_delayed_work(&p->restore_work); 1033 1034 pr_debug("Started evicting pasid 0x%x\n", p->pasid); 1035 ret = kfd_process_evict_queues(p); 1036 if (!ret) { 1037 dma_fence_signal(p->ef); 1038 dma_fence_put(p->ef); 1039 p->ef = NULL; 1040 queue_delayed_work(kfd_restore_wq, &p->restore_work, 1041 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); 1042 1043 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); 1044 } else 1045 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); 1046} 1047 1048static void restore_process_worker(struct work_struct *work) 1049{ 1050 struct delayed_work *dwork; 1051 struct kfd_process *p; 1052 int ret = 0; 1053 1054 dwork = to_delayed_work(work); 1055 1056 /* Process termination destroys this worker thread. So during the 1057 * lifetime of this thread, kfd_process p will be valid 1058 */ 1059 p = container_of(dwork, struct kfd_process, restore_work); 1060 pr_debug("Started restoring pasid 0x%x\n", p->pasid); 1061 1062 /* Setting last_restore_timestamp before successful restoration. 1063 * Otherwise this would have to be set by KGD (restore_process_bos) 1064 * before KFD BOs are unreserved. If not, the process can be evicted 1065 * again before the timestamp is set. 1066 * If restore fails, the timestamp will be set again in the next 1067 * attempt. This would mean that the minimum GPU quanta would be 1068 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two 1069 * functions) 1070 */ 1071 1072 p->last_restore_timestamp = get_jiffies_64(); 1073 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, 1074 &p->ef); 1075 if (ret) { 1076 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", 1077 p->pasid, PROCESS_BACK_OFF_TIME_MS); 1078 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, 1079 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); 1080 WARN(!ret, "reschedule restore work failed\n"); 1081 return; 1082 } 1083 1084 ret = kfd_process_restore_queues(p); 1085 if (!ret) 1086 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); 1087 else 1088 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); 1089} 1090 1091void kfd_suspend_all_processes(void) 1092{ 1093 struct kfd_process *p; 1094 unsigned int temp; 1095 int idx = srcu_read_lock(&kfd_processes_srcu); 1096 1097 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1098 cancel_delayed_work_sync(&p->eviction_work); 1099 cancel_delayed_work_sync(&p->restore_work); 1100 1101 if (kfd_process_evict_queues(p)) 1102 pr_err("Failed to suspend process 0x%x\n", p->pasid); 1103 dma_fence_signal(p->ef); 1104 dma_fence_put(p->ef); 1105 p->ef = NULL; 1106 } 1107 srcu_read_unlock(&kfd_processes_srcu, idx); 1108} 1109 1110int kfd_resume_all_processes(void) 1111{ 1112 struct kfd_process *p; 1113 unsigned int temp; 1114 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); 1115 1116 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1117 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { 1118 pr_err("Restore process %d failed during resume\n", 1119 p->pasid); 1120 ret = -EFAULT; 1121 } 1122 } 1123 srcu_read_unlock(&kfd_processes_srcu, idx); 1124 return ret; 1125} 1126 1127int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, 1128 struct vm_area_struct *vma) 1129{ 1130 struct kfd_process_device *pdd; 1131 struct qcm_process_device *qpd; 1132 1133 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { 1134 pr_err("Incorrect CWSR mapping size.\n"); 1135 return -EINVAL; 1136 } 1137 1138 pdd = kfd_get_process_device_data(dev, process); 1139 if (!pdd) 1140 return -EINVAL; 1141 qpd = &pdd->qpd; 1142 1143 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1144 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1145 if (!qpd->cwsr_kaddr) { 1146 pr_err("Error allocating per process CWSR buffer.\n"); 1147 return -ENOMEM; 1148 } 1149 1150 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND 1151 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; 1152 /* Mapping pages to user process */ 1153 return remap_pfn_range(vma, vma->vm_start, 1154 PFN_DOWN(__pa(qpd->cwsr_kaddr)), 1155 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 1156} 1157 1158void kfd_flush_tlb(struct kfd_process_device *pdd) 1159{ 1160 struct kfd_dev *dev = pdd->dev; 1161 1162 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 1163 /* Nothing to flush until a VMID is assigned, which 1164 * only happens when the first queue is created. 1165 */ 1166 if (pdd->qpd.vmid) 1167 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, 1168 pdd->qpd.vmid); 1169 } else { 1170 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, 1171 pdd->process->pasid); 1172 } 1173} 1174 1175#if defined(CONFIG_DEBUG_FS) 1176 1177int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 1178{ 1179 struct kfd_process *p; 1180 unsigned int temp; 1181 int r = 0; 1182 1183 int idx = srcu_read_lock(&kfd_processes_srcu); 1184 1185 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1186 seq_printf(m, "Process %d PASID 0x%x:\n", 1187 p->lead_thread->tgid, p->pasid); 1188 1189 mutex_lock(&p->mutex); 1190 r = pqm_debugfs_mqds(m, &p->pqm); 1191 mutex_unlock(&p->mutex); 1192 1193 if (r) 1194 break; 1195 } 1196 1197 srcu_read_unlock(&kfd_processes_srcu, idx); 1198 1199 return r; 1200} 1201 1202#endif 1203 1204