Lines Matching defs:range

48 /* Long enough to ensure no retry fault comes after svm range is restored and
60 /* Giant svm range split into smaller ranges based on this, it is decided using
74 const struct mmu_notifier_range *range,
85 * @prange: svm range structure to be removed
121 * svm_range_add_to_svms - add svm range to svms
122 * @prange: svm range structure to be added
124 * Add the svm range to svms interval tree and link list
187 bo_adev->kfd.pgmap.range.start;
446 * Migrate from GPU to GPU, remove range from source svm_bo->node
447 * range list, and return false to allocate svm_bo from destination
492 * svm_range_bo_release to finish removing this range from
493 * its range list and set prange->svm_bo to null. After this,
829 * svm_range_debug_dump - print all range information from svms
830 * @svms: svm range list header
832 * debug output svm range start, end, prefetch location from svms
843 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
853 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
972 * svm_range_split_adjust - split range and adjust
974 * @new: new range
975 * @old: the old range
976 * @start: the old range adjust to start address in pages
977 * @last: the old range adjust to last address in pages
979 * Copy system memory dma_addr or vram ttm_res in old range to new
980 * range from new_start up to size new->npages, the remaining old range is from
997 WARN_ONCE(1, "invalid new range start or last\n");
1027 * svm_range_split - split a range in 2 ranges
1029 * @prange: the svm range to split
1030 * @start: the remaining range start address in pages
1031 * @last: the remaining range last address in pages
1032 * @new: the result new range generated
1037 * new range [last + 1, prange->last]
1041 * new range [prange->start, start - 1]
1124 * @parent: parent range if prange is from child list
1144 /* Align splited range start and size to granularity size, then a single
1145 * PTE will be used for whole range, this reduces the number of PTE
1380 /* Collect all pages in the same address range and memory domain
1387 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1569 * To prevent concurrent destruction or change of range attributes, the
1572 * serialize concurrent migrations or validations of the same range, the
1580 * 1. Reserve page table (and SVM BO if range is in VRAM)
1585 * 4-c. Check that the range was not split or otherwise invalidated
1614 /* If prefetch range to GPU, or GPU retry fault migrate range to
1615 * GPU, which has ACCESS attribute to the range, create mapping
1693 pr_debug("failed %d to get svm range pages\n", r);
1706 pr_debug("failed %d to dma map range\n", r);
1711 pr_debug("hmm update the range, need validate again\n");
1716 pr_debug("range split by unmap in parallel, validate again\n");
1745 * @svms: the svm range list
1811 * If range is migrating, wait for migration is done.
1852 pr_debug("reschedule to restore svm range\n");
1862 * svm_range_evict - evict svm range
1863 * @prange: svm range structure
1867 * @event: mmu notifier event when range is evicted or migrated
1920 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
2040 * svm_range_add - add svm range and handle overlap
2041 * @p: the range add to this process svms
2050 * Check if the virtual address range has overlap with any existing ranges,
2053 * any range split or allocation fails, the entire update fails. Therefore any
2105 /* node intersects the update range and its attributes
2122 pr_debug("change old range start\n");
2129 pr_debug("change old range last\n");
2155 /* add a final range at the end if needed */
2318 * 1. unmap_from_cpu may change work_item.op and add the range
2445 * before the range is freed to avoid straggler interrupts on
2480 * @range: mmu_notifier_range struct
2483 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2486 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2491 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2499 const struct mmu_notifier_range *range,
2506 if (range->event == MMU_NOTIFY_RELEASE)
2513 start = max(start, range->start) >> PAGE_SHIFT;
2514 last = min(last, range->end - 1) >> PAGE_SHIFT;
2515 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2516 start, last, range->start >> PAGE_SHIFT,
2517 (range->end - 1) >> PAGE_SHIFT,
2519 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2526 switch (range->event) {
2531 svm_range_evict(prange, mni->mm, start, last, range->event);
2542 * svm_range_from_addr - find svm range from fault address
2543 * @svms: svm range list header
2544 * @addr: address to search range interval tree, in pages
2545 * @parent: parent range if range is on child list
2585 * @prange: svm range structure
2589 * the range mapping after GPU vm fault. Caller uses the best location to do
2594 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2595 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2596 * if range actual loc is cpu, best_loc is cpu
2597 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2598 * range actual loc.
2675 /* First range that starts after the fault address */
2679 /* Last range that ends before the fault address */
2682 /* Last range must end before addr because
2683 * there was no range after addr
2699 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2783 /* Create one page svm range if 2MB range overlapping */
2809 * @prange: svm range structure
2811 * GPU vm retry fault handle skip recover the range for cases:
2815 * 3. prange is child range, it is split from parent prange, recover later
2852 /* fault is on different page of same range
2866 * or fault cannot recover because GPU no access on the range
2955 /* Need the write lock to create new range with MMU notifier.
2957 * tree is up to date before we add a new range
2967 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2985 /* skip duplicate vm fault on different pages of same range */
3196 * svm_range_check_vm - check if virtual address range mapped already
3198 * @start: range start address, in pages
3199 * @last: range last address, in pages
3200 * @bo_s: mapping start address in pages if address range already mapped
3201 * @bo_l: mapping last address in pages if address range already mapped
3209 * Return 0 - OK, if the range is not mapped.
3237 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3255 * svm_range_is_valid - check if virtual address range is valid
3257 * @start: range start address, in pages
3258 * @size: range size, in pages
3260 * Valid virtual address range means it belongs to one or more VMAs
3290 * @prange: svm range structure
3293 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3296 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3302 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3305 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3373 * @prange: svm range structure
3376 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3378 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3530 pr_debug("invalid range r=%d\n", r);
3537 /* Add new range and split existing ranges as needed */
3596 pr_debug("failed %d to map svm range\n", r);
3657 pr_debug("invalid range r=%d\n", r);
3694 pr_debug("range attrs not found return default values\n");
3832 * not used by the range which was checkpointed. Care
3878 pr_err("CRIU: failed to set range attributes\n");
3910 /* Handle one SVM range object at a time, also the number of gpus are
4079 pr_err("CRIU: failed to obtain range attributes\n");