Lines Matching defs:prange

85  * @prange: svm range structure to be removed
92 static void svm_range_unlink(struct svm_range *prange)
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
97 if (prange->svm_bo) {
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
122 * @prange: svm range structure to be added
128 static void svm_range_add_to_svms(struct svm_range *prange)
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
139 static void svm_range_remove_notifier(struct svm_range *prange)
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
173 prange->dma_addr[gpuidx] = addr;
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
205 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
213 p = container_of(prange->svms, struct kfd_process, svms);
225 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
252 void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
260 p = container_of(prange->svms, struct kfd_process, svms);
263 dma_addr = prange->dma_addr[gpuidx];
274 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
276 prange->dma_addr[gpuidx] = NULL;
280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
282 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
288 svm_range_vram_node_free(prange);
289 svm_range_free_dma_mappings(prange, do_unmap);
292 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
296 mutex_destroy(&prange->lock);
297 mutex_destroy(&prange->migrate_mutex);
298 kfree(prange);
317 struct svm_range *prange;
320 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
321 if (!prange)
329 kfree(prange);
332 prange->npages = size;
333 prange->svms = svms;
334 prange->start = start;
335 prange->last = last;
336 INIT_LIST_HEAD(&prange->list);
337 INIT_LIST_HEAD(&prange->update_list);
338 INIT_LIST_HEAD(&prange->svm_bo_list);
339 INIT_LIST_HEAD(&prange->deferred_list);
340 INIT_LIST_HEAD(&prange->child_list);
341 atomic_set(&prange->invalid, 0);
342 prange->validate_timestamp = 0;
343 mutex_init(&prange->migrate_mutex);
344 mutex_init(&prange->lock);
347 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
350 svm_range_set_default_attributes(&prange->preferred_loc,
351 &prange->prefetch_loc,
352 &prange->granularity, &prange->flags);
356 return prange;
376 struct svm_range *prange =
382 list_del_init(&prange->svm_bo_list);
385 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
386 prange->start, prange->last);
387 mutex_lock(&prange->lock);
388 prange->svm_bo = NULL;
389 mutex_unlock(&prange->lock);
432 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
434 mutex_lock(&prange->lock);
435 if (!prange->svm_bo) {
436 mutex_unlock(&prange->lock);
439 if (prange->ttm_res) {
441 mutex_unlock(&prange->lock);
444 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
450 if (prange->svm_bo->node != node) {
451 mutex_unlock(&prange->lock);
453 spin_lock(&prange->svm_bo->list_lock);
454 list_del_init(&prange->svm_bo_list);
455 spin_unlock(&prange->svm_bo->list_lock);
457 svm_range_bo_unref(prange->svm_bo);
460 if (READ_ONCE(prange->svm_bo->evicting)) {
466 mutex_unlock(&prange->lock);
467 svm_bo = prange->svm_bo;
469 svm_range_bo_unref(prange->svm_bo);
479 mutex_unlock(&prange->lock);
481 prange->svms, prange->start, prange->last);
483 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
488 mutex_unlock(&prange->lock);
493 * its range list and set prange->svm_bo to null. After this,
496 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
518 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
529 p = container_of(prange->svms, struct kfd_process, svms);
530 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
531 prange->start, prange->last);
533 if (svm_range_validate_svm_bo(node, prange))
556 bp.size = prange->npages * PAGE_SIZE;
604 prange->svm_bo = svm_bo;
605 prange->ttm_res = bo->tbo.resource;
606 prange->offset = 0;
609 list_add(&prange->svm_bo_list, &svm_bo->range_list);
619 prange->ttm_res = NULL;
624 void svm_range_vram_node_free(struct svm_range *prange)
626 /* serialize prange->svm_bo unref */
627 mutex_lock(&prange->lock);
628 /* prange->svm_bo has not been unref */
629 if (prange->ttm_res) {
630 prange->ttm_res = NULL;
631 mutex_unlock(&prange->lock);
632 svm_range_bo_unref(prange->svm_bo);
634 mutex_unlock(&prange->lock);
638 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
643 p = container_of(prange->svms, struct kfd_process, svms);
654 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
658 p = container_of(prange->svms, struct kfd_process, svms);
722 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
732 prange->preferred_loc = attrs[i].value;
735 prange->prefetch_loc = attrs[i].value;
746 bitmap_clear(prange->bitmap_access, gpuidx, 1);
747 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
749 bitmap_set(prange->bitmap_access, gpuidx, 1);
750 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
752 bitmap_clear(prange->bitmap_access, gpuidx, 1);
753 bitmap_set(prange->bitmap_aip, gpuidx, 1);
758 prange->flags |= attrs[i].value;
762 prange->flags &= ~attrs[i].value;
765 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
774 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
783 if (prange->preferred_loc != attrs[i].value)
797 if (test_bit(gpuidx, prange->bitmap_access) ||
798 test_bit(gpuidx, prange->bitmap_aip))
801 if (!test_bit(gpuidx, prange->bitmap_access))
804 if (!test_bit(gpuidx, prange->bitmap_aip))
809 if ((prange->flags & attrs[i].value) != attrs[i].value)
813 if ((prange->flags & attrs[i].value) != 0)
817 if (prange->granularity != attrs[i].value)
840 struct svm_range *prange;
845 list_for_each_entry(prange, &svms->list, list) {
847 prange, prange->start, prange->npages,
848 prange->start + prange->npages - 1,
849 prange->actual_loc);
856 prange = container_of(node, struct svm_range, it_node);
858 prange, prange->start, prange->npages,
859 prange->start + prange->npages - 1,
860 prange->actual_loc);
951 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1029 * @prange: the svm range to split
1035 * case 1: if start == prange->start
1036 * prange ==> prange[start, last]
1037 * new range [last + 1, prange->last]
1039 * case 2: if last == prange->last
1040 * prange ==> prange[start, last]
1041 * new range [prange->start, start - 1]
1047 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1050 uint64_t old_start = prange->start;
1051 uint64_t old_last = prange->last;
1055 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1063 svms = prange->svms;
1071 r = svm_range_split_adjust(*new, prange, start, last);
1083 svm_range_split_tail(struct svm_range *prange,
1087 int r = svm_range_split(prange, prange->start, new_last, &tail);
1095 svm_range_split_head(struct svm_range *prange,
1099 int r = svm_range_split(prange, new_start, prange->last, &head);
1107 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1110 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1111 pchild, pchild->start, pchild->last, prange, op);
1115 list_add_tail(&pchild->child_list, &prange->child_list);
1123 * @addr: the vm fault address in pages, to split the prange
1124 * @parent: parent range if prange is from child list
1125 * @prange: prange to split
1127 * Trims @prange to be a single aligned block of prange->granularity if
1130 * Context: caller must hold mmap_read_lock and prange->lock
1138 struct svm_range *prange)
1148 size = 1UL << prange->granularity;
1153 prange->svms, prange->start, prange->last, start, last, size);
1155 if (start > prange->start) {
1156 r = svm_range_split(prange, start, prange->last, &head);
1162 if (last < prange->last) {
1163 r = svm_range_split(prange, prange->start, last, &tail);
1170 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1171 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1172 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1173 prange, prange->start, prange->last,
1187 struct svm_range *prange, int domain)
1190 uint32_t flags = prange->flags;
1199 bo_node = prange->svm_bo->node;
1302 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1312 if (!prange->mapped_to_gpu) {
1313 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1314 prange, prange->start, prange->last);
1318 if (prange->start == start && prange->last == last) {
1319 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1320 prange->mapped_to_gpu = false;
1323 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1325 p = container_of(prange->svms, struct kfd_process, svms);
1358 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1371 last_start = prange->start + offset;
1373 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1388 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1390 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1395 prange->svms, last_start, prange->start + i,
1404 last_start, prange->start + i,
1406 (last_start - prange->start) << PAGE_SHIFT,
1410 for (j = last_start - prange->start; j <= i; j++)
1414 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1417 last_start = prange->start + i + 1;
1423 prange->start);
1435 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1446 if (prange->svm_bo && prange->ttm_res)
1447 bo_adev = prange->svm_bo->node->adev;
1449 p = container_of(prange->svms, struct kfd_process, svms);
1468 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1469 prange->dma_addr[gpuidx],
1493 struct svm_range *prange;
1573 * prange->migrate_mutex must be held.
1591 struct svm_range *prange, int32_t gpuidx,
1604 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1605 ctx->prange = prange;
1612 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1618 if (prange->actual_loc) {
1620 prange->actual_loc);
1623 prange->actual_loc);
1627 if (test_bit(gpuidx, prange->bitmap_access))
1632 * If prange is already mapped or with always mapped flag,
1636 if (prange->mapped_to_gpu ||
1637 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1638 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1641 bitmap_or(ctx->bitmap, prange->bitmap_access,
1642 prange->bitmap_aip, MAX_GPU_INSTANCE);
1650 if (prange->actual_loc && !prange->ttm_res) {
1661 p = container_of(prange->svms, struct kfd_process, svms);
1671 start = prange->start << PAGE_SHIFT;
1672 end = (prange->last + 1) << PAGE_SHIFT;
1688 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1703 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1709 svm_range_lock(prange);
1715 if (!r && !list_empty(&prange->child_list)) {
1721 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1725 prange->mapped_to_gpu = true;
1727 svm_range_unlock(prange);
1734 prange->validate_timestamp = ktime_get_boottime();
1771 struct svm_range *prange;
1801 list_for_each_entry(prange, &svms->list, list) {
1802 invalid = atomic_read(&prange->invalid);
1806 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1807 prange->svms, prange, prange->start, prange->last,
1813 mutex_lock(&prange->migrate_mutex);
1815 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1819 prange->start);
1821 mutex_unlock(&prange->migrate_mutex);
1825 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1863 * @prange: svm range structure
1878 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1882 struct svm_range_list *svms = prange->svms;
1889 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1890 svms, prange->start, prange->last, start, last);
1893 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1895 bool mapped = prange->mapped_to_gpu;
1897 list_for_each_entry(pchild, &prange->child_list, child_list) {
1913 if (prange->start <= last && prange->last >= start)
1914 atomic_inc(&prange->invalid);
1921 prange->svms, prange->start, prange->last);
1941 prange->svms, start, last);
1942 list_for_each_entry(pchild, &prange->child_list, child_list) {
1950 s = max(start, prange->start);
1951 l = min(last, prange->last);
1953 svm_range_unmap_from_gpus(prange, s, l, trigger);
2019 struct svm_range *prange;
2028 prange = svm_range_new(svms, start, l, true);
2029 if (!prange)
2031 list_add(&prange->list, insert_list);
2032 list_add(&prange->update_list, update_list);
2077 struct svm_range *prange;
2097 prange = container_of(node, struct svm_range, it_node);
2101 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2102 prange->mapped_to_gpu) {
2109 struct svm_range *old = prange;
2111 prange = svm_range_clone(old);
2112 if (!prange) {
2118 list_add(&prange->list, insert_list);
2119 list_add(&prange->update_list, update_list);
2123 r = svm_range_split_head(prange, start,
2130 r = svm_range_split_tail(prange, last,
2139 list_add(&prange->update_list, update_list);
2163 list_for_each_entry_safe(prange, tmp, insert_list, list)
2164 svm_range_free(prange, false);
2165 list_for_each_entry_safe(prange, tmp, &new_list, list)
2166 svm_range_free(prange, true);
2176 struct svm_range *prange)
2181 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2182 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2184 if (prange->start == start && prange->last == last)
2187 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2188 prange->svms, prange, start, last, prange->start,
2189 prange->last);
2192 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2193 svm_range_remove_notifier(prange);
2195 prange->it_node.start = prange->start;
2196 prange->it_node.last = prange->last;
2198 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2199 svm_range_add_notifier_locked(mm, prange);
2203 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2206 switch (prange->work_item.op) {
2208 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2209 svms, prange, prange->start, prange->last);
2212 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2213 svms, prange, prange->start, prange->last);
2214 svm_range_unlink(prange);
2215 svm_range_remove_notifier(prange);
2216 svm_range_free(prange, true);
2219 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2220 svms, prange, prange->start, prange->last);
2221 svm_range_update_notifier_and_interval_tree(mm, prange);
2224 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2225 svms, prange, prange->start, prange->last);
2226 svm_range_update_notifier_and_interval_tree(mm, prange);
2230 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2231 prange->start, prange->last);
2232 svm_range_add_to_svms(prange);
2233 svm_range_add_notifier_locked(mm, prange);
2236 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2237 prange, prange->start, prange->last);
2238 svm_range_add_to_svms(prange);
2239 svm_range_add_notifier_locked(mm, prange);
2243 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2244 prange->work_item.op);
2288 struct svm_range *prange;
2296 prange = list_first_entry(&svms->deferred_range_list,
2300 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2301 prange->start, prange->last, prange->work_item.op);
2303 mm = prange->work_item.mm;
2325 list_del_init(&prange->deferred_list);
2329 mutex_lock(&prange->migrate_mutex);
2330 while (!list_empty(&prange->child_list)) {
2333 pchild = list_first_entry(&prange->child_list,
2335 pr_debug("child prange 0x%p op %d\n", pchild,
2340 mutex_unlock(&prange->migrate_mutex);
2342 svm_range_handle_list_op(svms, prange, mm);
2358 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2362 /* if prange is on the deferred list */
2363 if (!list_empty(&prange->deferred_list)) {
2364 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2365 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2367 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2368 prange->work_item.op = op;
2370 prange->work_item.op = op;
2374 prange->work_item.mm = mm;
2375 list_add_tail(&prange->deferred_list,
2376 &prange->svms->deferred_range_list);
2377 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2378 prange, prange->start, prange->last, op);
2393 struct svm_range *prange, unsigned long start,
2399 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2400 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2401 prange->start, prange->last);
2404 if (start > prange->last || last < prange->start)
2407 head = tail = prange;
2408 if (start > prange->start)
2409 svm_range_split(prange, prange->start, start - 1, &tail);
2413 if (head != prange && tail != prange) {
2416 } else if (tail != prange) {
2418 } else if (head != prange) {
2420 } else if (parent != prange) {
2421 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2426 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2441 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2442 prange, prange->start, prange->last, start, last);
2450 unmap_parent = start <= prange->start && last >= prange->last;
2452 list_for_each_entry(pchild, &prange->child_list, child_list) {
2458 svm_range_unmap_split(mm, prange, pchild, start, last);
2461 s = max(start, prange->start);
2462 l = min(last, prange->last);
2464 svm_range_unmap_from_gpus(prange, s, l, trigger);
2465 svm_range_unmap_split(mm, prange, prange, start, last);
2468 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2470 svm_range_add_list_work(svms, prange, mm,
2486 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2487 * work thread, and split prange if only part of prange is unmapped.
2495 * for invalidate event, prange lock is held if this is from migration
2502 struct svm_range *prange;
2521 prange = container_of(mni, struct svm_range, notifier);
2523 svm_range_lock(prange);
2528 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2531 svm_range_evict(prange, mni->mm, start, last, range->event);
2535 svm_range_unlock(prange);
2556 struct svm_range *prange;
2563 prange = container_of(node, struct svm_range, it_node);
2564 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2565 addr, prange->start, prange->last, node->start, node->last);
2567 if (addr >= prange->start && addr <= prange->last) {
2569 *parent = prange;
2570 return prange;
2572 list_for_each_entry(pchild, &prange->child_list, child_list)
2577 *parent = prange;
2585 * @prange: svm range structure
2606 svm_range_best_restore_location(struct svm_range *prange,
2615 p = container_of(prange->svms, struct kfd_process, svms);
2626 if (prange->preferred_loc == gpuid ||
2627 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2628 return prange->preferred_loc;
2629 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2630 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2632 return prange->preferred_loc;
2636 if (test_bit(*gpuidx, prange->bitmap_access))
2639 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2640 if (!prange->actual_loc)
2643 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2645 return prange->actual_loc;
2763 struct svm_range *prange = NULL;
2788 prange = svm_range_new(&p->svms, start, last, true);
2789 if (!prange) {
2790 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2795 svm_range_free(prange, true);
2800 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2802 svm_range_add_to_svms(prange);
2803 svm_range_add_notifier_locked(mm, prange);
2805 return prange;
2808 /* svm_range_skip_recover - decide if prange can be recovered
2809 * @prange: svm range structure
2812 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2813 * deferred list work will drain the stale fault before free the prange.
2814 * 2. prange is on deferred list to add interval notifier after split, or
2815 * 3. prange is child range, it is split from parent prange, recover later
2820 static bool svm_range_skip_recover(struct svm_range *prange)
2822 struct svm_range_list *svms = prange->svms;
2825 if (list_empty(&prange->deferred_list) &&
2826 list_empty(&prange->child_list)) {
2832 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2833 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2834 svms, prange, prange->start, prange->last);
2837 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2838 prange->work_item.op == SVM_OP_ADD_RANGE) {
2839 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2840 svms, prange, prange->start, prange->last);
2893 struct svm_range *prange;
2950 prange = svm_range_from_addr(svms, addr, NULL);
2951 if (!prange) {
2952 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2965 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2966 if (!prange) {
2977 mutex_lock(&prange->migrate_mutex);
2979 if (svm_range_skip_recover(prange)) {
2986 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2989 svms, prange->start, prange->last);
3011 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3014 svms, prange->start, prange->last);
3020 svms, prange->start, prange->last, best_loc,
3021 prange->actual_loc);
3026 if (prange->actual_loc != best_loc) {
3029 r = svm_migrate_to_vram(prange, best_loc, mm,
3037 if (prange->actual_loc)
3038 r = svm_migrate_vram_to_ram(prange, mm,
3045 r = svm_migrate_vram_to_ram(prange, mm,
3051 r, svms, prange->start, prange->last);
3056 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
3059 r, svms, prange->start, prange->last);
3065 mutex_unlock(&prange->migrate_mutex);
3087 struct svm_range *prange, *pchild;
3096 list_for_each_entry(prange, &p->svms.list, list) {
3097 svm_range_lock(prange);
3098 list_for_each_entry(pchild, &prange->child_list, child_list) {
3112 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3124 svm_range_unlock(prange);
3144 struct svm_range *prange;
3161 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3162 svm_range_unlink(prange);
3163 svm_range_remove_notifier(prange);
3164 svm_range_free(prange, true);
3290 * @prange: svm range structure
3315 svm_range_best_prefetch_location(struct svm_range *prange)
3318 uint32_t best_loc = prange->prefetch_loc;
3324 p = container_of(prange->svms, struct kfd_process, svms);
3329 bo_node = svm_range_get_node_by_id(prange, best_loc);
3342 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3344 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3365 p->xnack_enabled, &p->svms, prange->start, prange->last,
3373 * @prange: svm range structure
3386 * a. svm_range_validate_vram takes prange->migrate_mutex
3396 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3403 best_loc = svm_range_best_prefetch_location(prange);
3406 best_loc == prange->actual_loc)
3410 r = svm_migrate_vram_to_ram(prange, mm,
3416 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3457 struct svm_range *prange =
3462 list_del_init(&prange->svm_bo_list);
3465 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3466 prange->start, prange->last);
3468 mutex_lock(&prange->migrate_mutex);
3470 r = svm_migrate_vram_to_ram(prange, mm,
3472 } while (!r && prange->actual_loc && --retries);
3474 if (!r && prange->actual_loc)
3477 if (!prange->actual_loc) {
3478 mutex_lock(&prange->lock);
3479 prange->svm_bo = NULL;
3480 mutex_unlock(&prange->lock);
3482 mutex_unlock(&prange->migrate_mutex);
3509 struct svm_range *prange;
3546 list_for_each_entry_safe(prange, next, &insert_list, list) {
3547 svm_range_add_to_svms(prange);
3548 svm_range_add_notifier_locked(mm, prange);
3550 list_for_each_entry(prange, &update_list, update_list) {
3551 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3554 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3555 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3556 prange->svms, prange, prange->start,
3557 prange->last);
3558 svm_range_unlink(prange);
3559 svm_range_remove_notifier(prange);
3560 svm_range_free(prange, false);
3569 list_for_each_entry(prange, &update_list, update_list) {
3572 mutex_lock(&prange->migrate_mutex);
3574 r = svm_range_trigger_migration(mm, prange, &migrated);
3579 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3580 prange->mapped_to_gpu) {
3582 mutex_unlock(&prange->migrate_mutex);
3587 mutex_unlock(&prange->migrate_mutex);
3591 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3593 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3599 mutex_unlock(&prange->migrate_mutex);
3633 struct svm_range *prange;
3712 prange = container_of(node, struct svm_range, it_node);
3716 if (prange->preferred_loc ==
3719 location != prange->preferred_loc)) {
3723 location = prange->preferred_loc;
3727 if (prange->prefetch_loc ==
3730 prefetch_loc != prange->prefetch_loc)) {
3734 prefetch_loc = prange->prefetch_loc;
3739 prange->bitmap_access, MAX_GPU_INSTANCE);
3741 prange->bitmap_aip, MAX_GPU_INSTANCE);
3744 flags_and &= prange->flags;
3745 flags_or |= prange->flags;
3748 if (get_granularity && prange->granularity < granularity)
3749 granularity = prange->granularity;
3957 struct svm_range *prange;
3967 list_for_each_entry(prange, &svms->list, list) {
3968 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3969 prange, prange->start, prange->npages,
3970 prange->start + prange->npages - 1);
4019 struct svm_range *prange;
4063 list_for_each_entry(prange, &svms->list, list) {
4066 svm_priv->start_addr = prange->start;
4067 svm_priv->size = prange->npages;
4069 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4070 prange, prange->start, prange->npages,
4071 prange->start + prange->npages - 1,
4072 prange->npages * PAGE_SIZE);