Lines Matching defs:svms

53 #define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
87 * Remove the svm_range from the svms and svm_bo lists and the svms
90 * Context: The caller must hold svms->lock
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
121 * svm_range_add_to_svms - add svm range to svms
124 * Add the svm range to svms interval tree and link list
126 * Context: The caller must hold svms->lock
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
133 list_move_tail(&prange->list, &prange->svms->list);
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
214 p = container_of(prange->svms, struct kfd_process, svms);
261 p = container_of(prange->svms, struct kfd_process, svms);
282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
323 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
334 p = container_of(svms, struct kfd_process, svms);
343 prange->svms = svms;
358 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
365 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
396 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
493 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
494 prange->svms, prange->start, prange->last);
542 p = container_of(prange->svms, struct kfd_process, svms);
543 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
656 p = container_of(prange->svms, struct kfd_process, svms);
671 p = container_of(prange->svms, struct kfd_process, svms);
725 !test_bit(gpuidx, p->svms.bitmap_supported)) {
842 * svm_range_debug_dump - print all range information from svms
843 * @svms: svm range list header
845 * debug output svm range start, end, prefetch location from svms
848 * Context: The caller must hold svms->lock
850 static void svm_range_debug_dump(struct svm_range_list *svms)
855 pr_debug("dump svms 0x%p list\n", svms);
858 list_for_each_entry(prange, &svms->list, list) {
865 pr_debug("dump svms 0x%p interval tree\n", svms);
867 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
983 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
984 new->svms, new, new->start, start, last);
1024 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1025 new->svms, new->start, old->start, old->last, start, last);
1084 struct svm_range_list *svms;
1087 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1095 svms = prange->svms;
1097 *new = svm_range_new(svms, last + 1, old_last, false);
1099 *new = svm_range_new(svms, old_start, start - 1, false);
1303 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1309 p = container_of(prange->svms, struct kfd_process, svms);
1357 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1378 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1379 prange->svms, last_start, prange->start + i,
1433 p = container_of(prange->svms, struct kfd_process, svms);
1589 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1648 p = container_of(prange->svms, struct kfd_process, svms);
1676 WRITE_ONCE(p->svms.faulting_task, current);
1680 WRITE_ONCE(p->svms.faulting_task, NULL);
1741 * @svms: the svm range list
1748 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1752 flush_work(&svms->deferred_list_work);
1755 if (list_empty(&svms->deferred_range_list))
1766 struct svm_range_list *svms;
1774 svms = container_of(dwork, struct svm_range_list, restore_work);
1775 evicted_ranges = atomic_read(&svms->evicted_ranges);
1781 p = container_of(svms, struct kfd_process, svms);
1787 pr_debug("svms 0x%p process mm gone\n", svms);
1792 svm_range_list_lock_and_flush_work(svms, mm);
1793 mutex_lock(&svms->lock);
1795 evicted_ranges = atomic_read(&svms->evicted_ranges);
1797 list_for_each_entry(prange, &svms->list, list) {
1802 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1803 prange->svms, prange, prange->start, prange->last,
1825 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1842 mutex_unlock(&svms->lock);
1849 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1878 struct svm_range_list *svms = prange->svms;
1883 p = container_of(svms, struct kfd_process, svms);
1885 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1886 svms, prange->start, prange->last, start, last);
1912 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1916 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1917 prange->svms, prange->start, prange->last);
1924 pr_debug("schedule to restore svm %p ranges\n", svms);
1925 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1936 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1937 prange->svms, start, last);
1959 new = svm_range_new(old->svms, old->start, old->last, false);
2012 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2025 prange = svm_range_new(svms, start, l, true);
2038 * @p: the range add to this process svms
2044 * @insert_list: output, the ranges need insert to svms
2045 * @remove_list: output, the ranges are replaced and need remove from svms
2061 * Context: Process context, caller must hold svms->lock
2073 struct svm_range_list *svms = &p->svms;
2080 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2088 node = interval_tree_iter_first(&svms->objects, start, last);
2143 r = svm_range_split_new(svms, start, node->start - 1,
2156 r = svm_range_split_new(svms, start, last,
2187 prange->svms, prange, start, last, prange->start,
2191 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2197 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2202 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2208 svms, prange, prange->start, prange->last);
2212 svms, prange, prange->start, prange->last);
2219 svms, prange, prange->start, prange->last);
2224 svms, prange, prange->start, prange->last);
2229 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2235 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2247 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2254 p = container_of(svms, struct kfd_process, svms);
2257 drain = atomic_read(&svms->drain_pagefaults);
2261 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2266 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2278 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2280 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2286 struct svm_range_list *svms;
2290 svms = container_of(work, struct svm_range_list, deferred_list_work);
2291 pr_debug("enter svms 0x%p\n", svms);
2293 spin_lock(&svms->deferred_list_lock);
2294 while (!list_empty(&svms->deferred_range_list)) {
2295 prange = list_first_entry(&svms->deferred_range_list,
2297 spin_unlock(&svms->deferred_list_lock);
2309 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2311 svm_range_drain_retry_fault(svms);
2323 spin_lock(&svms->deferred_list_lock);
2325 spin_unlock(&svms->deferred_list_lock);
2327 mutex_lock(&svms->lock);
2337 svm_range_handle_list_op(svms, pchild, mm);
2341 svm_range_handle_list_op(svms, prange, mm);
2342 mutex_unlock(&svms->lock);
2350 spin_lock(&svms->deferred_list_lock);
2352 spin_unlock(&svms->deferred_list_lock);
2353 pr_debug("exit svms 0x%p\n", svms);
2357 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2360 spin_lock(&svms->deferred_list_lock);
2375 &prange->svms->deferred_range_list);
2379 spin_unlock(&svms->deferred_list_lock);
2382 void schedule_deferred_list_work(struct svm_range_list *svms)
2384 spin_lock(&svms->deferred_list_lock);
2385 if (!list_empty(&svms->deferred_range_list))
2386 schedule_work(&svms->deferred_list_work);
2387 spin_unlock(&svms->deferred_list_lock);
2429 struct svm_range_list *svms;
2438 svms = &p->svms;
2440 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2447 atomic_inc(&svms->drain_pagefaults);
2467 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2469 svm_range_add_list_work(svms, prange, mm,
2471 schedule_deferred_list_work(svms);
2485 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2542 * @svms: svm range list header
2546 * Context: The caller must hold svms->lock
2551 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2558 node = interval_tree_iter_first(&svms->objects, addr, addr);
2614 p = container_of(prange->svms, struct kfd_process, svms);
2675 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2684 rb_node = rb_last(&p->svms.objects.rb_root);
2787 prange = svm_range_new(&p->svms, start, last, true);
2821 struct svm_range_list *svms = prange->svms;
2823 spin_lock(&svms->deferred_list_lock);
2826 spin_unlock(&svms->deferred_list_lock);
2829 spin_unlock(&svms->deferred_list_lock);
2832 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2833 svms, prange, prange->start, prange->last);
2838 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2839 svms, prange, prange->start, prange->last);
2892 struct svm_range_list *svms;
2914 svms = &p->svms;
2916 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2918 if (atomic_read(&svms->drain_pagefaults)) {
2935 pr_debug("svms 0x%p failed to get mm\n", svms);
2949 mutex_lock(&svms->lock);
2950 prange = svm_range_from_addr(svms, addr, NULL);
2952 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2953 svms, addr);
2959 mutex_unlock(&svms->lock);
2967 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2968 svms, addr);
2988 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2989 svms, prange->start, prange->last);
3013 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3014 svms, prange->start, prange->last);
3019 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3020 svms, prange->start, prange->last, best_loc,
3053 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3054 r, svms, start, last);
3062 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3063 r, svms, start, last);
3071 mutex_unlock(&svms->lock);
3098 mutex_lock(&p->svms.lock);
3100 list_for_each_entry(prange, &p->svms.list, list) {
3137 /* Change xnack mode must be inside svms lock, to avoid race with
3142 mutex_unlock(&p->svms.lock);
3151 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3153 cancel_delayed_work_sync(&p->svms.restore_work);
3156 flush_work(&p->svms.deferred_list_work);
3162 atomic_inc(&p->svms.drain_pagefaults);
3163 svm_range_drain_retry_fault(&p->svms);
3165 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3171 mutex_destroy(&p->svms.lock);
3173 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3178 struct svm_range_list *svms = &p->svms;
3181 svms->objects = RB_ROOT_CACHED;
3182 mutex_init(&svms->lock);
3183 INIT_LIST_HEAD(&svms->list);
3184 atomic_set(&svms->evicted_ranges, 0);
3185 atomic_set(&svms->drain_pagefaults, 0);
3186 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3187 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3188 INIT_LIST_HEAD(&svms->deferred_range_list);
3189 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3190 spin_lock_init(&svms->deferred_list_lock);
3194 bitmap_set(svms->bitmap_supported, i, 1);
3328 p = container_of(prange->svms, struct kfd_process, svms);
3368 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3369 p->xnack_enabled, &p->svms, prange->start, prange->last,
3475 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3523 struct svm_range_list *svms;
3530 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3531 p->pasid, &p->svms, start, start + size - 1, size);
3537 svms = &p->svms;
3541 svm_range_list_lock_and_flush_work(svms, mm);
3550 mutex_lock(&svms->lock);
3556 mutex_unlock(&svms->lock);
3571 prange->svms, prange, prange->start,
3632 dynamic_svm_range_dump(svms);
3634 mutex_unlock(&svms->lock);
3639 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3640 &p->svms, start, start + size - 1, r);
3660 struct svm_range_list *svms;
3670 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3679 flush_work(&p->svms.deferred_list_work);
3716 svms = &p->svms;
3718 mutex_lock(&svms->lock);
3720 node = interval_tree_iter_first(&svms->objects, start, last);
3727 bitmap_copy(bitmap_access, svms->bitmap_supported,
3734 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3735 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3782 mutex_unlock(&svms->lock);
3827 struct svm_range_list *svms = &p->svms;
3834 if (list_empty(&svms->criu_svm_metadata_list)) {
3848 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3914 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3933 struct svm_range_list *svms = &p->svms;
3968 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3984 struct svm_range_list *svms;
3990 svms = &p->svms;
3991 if (!svms)
3994 mutex_lock(&svms->lock);
3995 list_for_each_entry(prange, &svms->list, list) {
4001 mutex_unlock(&svms->lock);
4045 struct svm_range_list *svms;
4050 svms = &p->svms;
4051 if (!svms)
4091 list_for_each_entry(prange, &svms->list, list) {