Lines Matching defs:mm

109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
539 struct mm_struct *mm;
554 mm = get_task_mm(p->lead_thread);
555 if (!mm) {
556 pr_debug("failed to get mm\n");
563 mm,
565 mmput(mm);
1145 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1151 pchild->work_item.mm = mm;
1574 static int svm_range_validate_and_map(struct mm_struct *mm,
1670 vma = vma_lookup(mm, addr);
1742 * @mm: the mm structure
1749 struct mm_struct *mm)
1753 mmap_write_lock(mm);
1757 mmap_write_unlock(mm);
1769 struct mm_struct *mm;
1784 /* Keep mm reference when svm_range_validate_and_map ranges */
1785 mm = get_task_mm(p->lead_thread);
1786 if (!mm) {
1787 pr_debug("svms 0x%p process mm gone\n", svms);
1792 svm_range_list_lock_and_flush_work(svms, mm);
1811 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1831 r = kgd2kfd_resume_mm(mm);
1843 mmap_write_unlock(mm);
1852 kfd_smi_event_queue_restore_rescheduled(mm);
1854 mmput(mm);
1860 * @mm: current process mm_struct
1874 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1920 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2174 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2198 svm_range_add_notifier_locked(mm, prange);
2203 struct mm_struct *mm)
2220 svm_range_update_notifier_and_interval_tree(mm, prange);
2225 svm_range_update_notifier_and_interval_tree(mm, prange);
2232 svm_range_add_notifier_locked(mm, prange);
2238 svm_range_add_notifier_locked(mm, prange);
2288 struct mm_struct *mm;
2302 mm = prange->work_item.mm;
2304 mmap_write_lock(mm);
2310 mmap_write_unlock(mm);
2337 svm_range_handle_list_op(svms, pchild, mm);
2341 svm_range_handle_list_op(svms, prange, mm);
2343 mmap_write_unlock(mm);
2346 * last mm refcount, schedule release work to avoid circular locking
2348 mmput_async(mm);
2358 struct mm_struct *mm, enum svm_work_list_ops op)
2364 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2372 mmget(mm);
2373 prange->work_item.mm = mm;
2391 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2413 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2414 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2416 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2418 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2425 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2435 p = kfd_lookup_process_by_mm(mm);
2457 svm_range_unmap_split(mm, prange, pchild, start, last);
2464 svm_range_unmap_split(mm, prange, prange, start, last);
2467 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2469 svm_range_add_list_work(svms, prange, mm,
2507 if (!mmget_not_zero(mni->mm))
2527 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2530 svm_range_evict(prange, mni->mm, start, last, range->event);
2535 mmput(mni->mm);
2662 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2759 struct mm_struct *mm,
2802 svm_range_add_notifier_locked(mm, prange);
2891 struct mm_struct *mm = NULL;
2933 mm = get_task_mm(p->lead_thread);
2934 if (!mm) {
2935 pr_debug("svms 0x%p failed to get mm\n", svms);
2947 mmap_read_lock(mm);
2960 mmap_read_unlock(mm);
2961 mmap_write_lock(mm);
2965 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2969 mmap_write_downgrade(mm);
2975 mmap_write_downgrade(mm);
2997 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3035 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3043 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3049 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3059 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3072 mmap_read_unlock(mm);
3076 mmput(mm);
3160 * not find kfd process and take mm lock to recover fault.
3282 vma = vma_lookup(p->mm, start);
3376 * @mm: current process mm_struct
3400 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3419 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3426 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3452 struct mm_struct *mm;
3457 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3458 mm = svm_bo->eviction_fence->mm;
3464 mmap_read_lock(mm);
3483 r = svm_migrate_vram_to_ram(prange, mm,
3501 mmap_read_unlock(mm);
3502 mmput(mm);
3514 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3541 svm_range_list_lock_and_flush_work(svms, mm);
3546 mmap_write_unlock(mm);
3557 mmap_write_unlock(mm);
3563 svm_range_add_notifier_locked(mm, prange);
3578 mmap_write_downgrade(mm);
3589 r = svm_range_trigger_migration(mm, prange, &migrated);
3608 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3623 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3635 mmap_read_unlock(mm);
3646 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3681 mmap_read_lock(mm);
3683 mmap_read_unlock(mm);
3832 struct mm_struct *mm;
3839 mm = get_task_mm(p->lead_thread);
3840 if (!mm) {
3841 pr_err("failed to get mm for the target process\n");
3902 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3920 mmput(mm);
4048 struct mm_struct *mm;
4054 mm = get_task_mm(p->lead_thread);
4055 if (!mm) {
4056 pr_err("failed to get mm for the target process\n");
4102 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4128 mmput(mm);
4136 struct mm_struct *mm = current->mm;
4144 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4147 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);