Lines Matching refs:vma

72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
92 pud = alloc_new_pud(mm, vma, addr);
105 static void take_rmap_locks(struct vm_area_struct *vma)
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
113 static void drop_rmap_locks(struct vm_area_struct *vma)
115 if (vma->anon_vma)
116 anon_vma_unlock_write(vma->anon_vma);
117 if (vma->vm_file)
118 i_mmap_unlock_write(vma->vm_file->f_mapping);
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
141 struct mm_struct *mm = vma->vm_mm;
157 * - During exec() shift_arg_pages(), we use a specially tagged vma
160 * - During mremap(), new_vma is often known to be placed after vma
167 take_rmap_locks(vma);
186 flush_tlb_batched_pending(vma->vm_mm);
215 flush_tlb_range(vma, old_end - len, old_end);
222 drop_rmap_locks(vma);
236 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
240 struct mm_struct *mm = vma->vm_mm;
275 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
287 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
295 static inline bool move_normal_pmd(struct vm_area_struct *vma,
304 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
308 struct mm_struct *mm = vma->vm_mm;
324 old_ptl = pud_lock(vma->vm_mm, old_pud);
336 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
344 static inline bool move_normal_pud(struct vm_area_struct *vma,
353 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
357 struct mm_struct *mm = vma->vm_mm;
371 old_ptl = pud_lock(vma->vm_mm, old_pud);
385 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
393 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
451 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
459 take_rmap_locks(vma);
463 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
467 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
472 move_huge_pmd(vma, old_addr, new_addr, old_entry,
477 move_huge_pud(vma, old_addr, new_addr, old_entry,
487 drop_rmap_locks(vma);
498 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
508 if (!for_stack && vma->vm_start != addr_to_align)
512 if (for_stack && addr_masked >= vma->vm_start)
519 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
544 unsigned long move_page_tables(struct vm_area_struct *vma,
559 if (is_vm_hugetlb_page(vma))
560 return move_hugetlb_page_tables(vma, new_vma, old_addr,
568 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
571 flush_cache_range(vma, old_addr, old_end);
572 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
584 old_pud = get_old_pud(vma->vm_mm, old_addr);
587 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
592 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
599 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
605 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
608 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
615 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
618 split_huge_pmd(vma, old_pmd, old_addr);
625 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
633 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
650 static unsigned long move_vma(struct vm_area_struct *vma,
657 struct mm_struct *mm = vma->vm_mm;
659 unsigned long vm_flags = vma->vm_flags;
671 * which may split one vma into three before unmapping.
679 if (vma->vm_ops && vma->vm_ops->may_split) {
680 if (vma->vm_start != old_addr)
681 err = vma->vm_ops->may_split(vma, old_addr);
682 if (!err && vma->vm_end != old_addr + old_len)
683 err = vma->vm_ops->may_split(vma, old_addr + old_len);
692 * pages recently unmapped. But leave vma->vm_flags as it was,
693 * so KSM can come around to merge on vma and new_vma afterwards.
695 err = ksm_madvise(vma, old_addr, old_addr + old_len,
705 vma_start_write(vma);
706 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
707 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
715 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
719 } else if (vma->vm_ops && vma->vm_ops->mremap) {
720 err = vma->vm_ops->mremap(new_vma);
729 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
731 vma = new_vma;
739 if (is_vm_hugetlb_page(vma)) {
740 clear_vma_resv_huge_pages(vma);
745 vm_flags_clear(vma, VM_ACCOUNT);
746 if (vma->vm_start < old_addr)
747 account_start = vma->vm_start;
748 if (vma->vm_end > old_addr + old_len)
749 account_end = vma->vm_end;
762 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
764 /* Tell pfnmap has moved from this vma */
765 if (unlikely(vma->vm_flags & VM_PFNMAP))
766 untrack_pfn_clear(vma);
769 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
770 vm_flags_clear(vma, VM_LOCKED_MASK);
773 * anon_vma links of the old vma is no longer needed after its page
776 if (new_vma != vma && vma->vm_start == old_addr &&
777 vma->vm_end == (old_addr + old_len))
778 unlink_anon_vmas(vma);
786 /* OOM: unable to split vma, just get accounts right */
799 /* Restore VM_ACCOUNT if one or two pieces of vma left */
801 vma = vma_prev(&vmi);
802 vm_flags_set(vma, VM_ACCOUNT);
806 vma = vma_next(&vmi);
807 vm_flags_set(vma, VM_ACCOUNT);
817 struct vm_area_struct *vma;
820 vma = vma_lookup(mm, addr);
821 if (!vma)
832 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
838 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
842 if (old_len > vma->vm_end - addr)
846 return vma;
849 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
850 pgoff += vma->vm_pgoff;
854 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
857 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
860 if (!may_expand_vm(mm, vma->vm_flags,
864 return vma;
874 struct vm_area_struct *vma;
893 * state of the vma's after it gets -ENOMEM.
896 * Worst-scenario case is when both vma's (new_addr and old_addr) get
918 vma = vma_to_resize(addr, old_len, new_len, flags);
919 if (IS_ERR(vma)) {
920 ret = PTR_ERR(vma);
926 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
934 if (vma->vm_flags & VM_MAYSHARE)
937 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
938 ((addr - vma->vm_start) >> PAGE_SHIFT),
947 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
954 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
956 unsigned long end = vma->vm_end + delta;
958 if (end < vma->vm_end) /* overflow */
960 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
962 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
980 struct vm_area_struct *vma;
1031 vma = vma_lookup(mm, addr);
1032 if (!vma) {
1037 if (is_vm_hugetlb_page(vma)) {
1038 struct hstate *h __maybe_unused = hstate_vma(vma);
1090 vma = vma_to_resize(addr, old_len, new_len, flags);
1091 if (IS_ERR(vma)) {
1092 ret = PTR_ERR(vma);
1098 if (old_len == vma->vm_end - addr) {
1102 if (vma_expandable(vma, delta)) {
1104 VMA_ITERATOR(vmi, mm, vma->vm_end);
1107 if (vma->vm_flags & VM_ACCOUNT) {
1117 * extension we are adding to the already existing vma,
1119 * already existing vma (expand operation itself) and
1120 * possibly also with the next vma if it becomes
1121 * adjacent to the expanded vma and otherwise
1124 vma = vma_merge_extend(&vmi, vma, delta);
1125 if (!vma) {
1131 vm_stat_account(mm, vma->vm_flags, pages);
1132 if (vma->vm_flags & VM_LOCKED) {
1149 if (vma->vm_flags & VM_MAYSHARE)
1152 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1153 vma->vm_pgoff +
1154 ((addr - vma->vm_start) >> PAGE_SHIFT),
1161 ret = move_vma(vma, addr, old_len, new_len, new_addr,