Lines Matching defs:new_addr

139 		unsigned long new_addr, bool need_rmap_locks)
178 new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
190 new_pte++, new_addr += PAGE_SIZE) {
208 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
210 set_pte_at(mm, new_addr, new_pte, pte);
237 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
296 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
305 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
345 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
354 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
384 set_pud_at(mm, new_addr, new_pud, pud);
394 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
416 unsigned long new_addr)
441 next = (new_addr + size) & mask;
442 if (extent > next - new_addr)
443 extent = next - new_addr;
452 unsigned long old_addr, unsigned long new_addr,
463 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
467 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
472 move_huge_pmd(vma, old_addr, new_addr, old_entry,
477 move_huge_pud(vma, old_addr, new_addr, old_entry,
524 unsigned long *new_addr, struct vm_area_struct *new_vma,
532 if ((*old_addr & ~mask) != (*new_addr & ~mask))
537 !can_align_down(new_vma, *new_addr, mask, for_stack))
541 *new_addr = *new_addr & mask;
546 unsigned long new_addr, unsigned long len,
561 new_addr, len);
568 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
576 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
582 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
587 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
592 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
599 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
604 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
608 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
615 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
625 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
634 new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
652 unsigned long new_len, unsigned long new_addr,
707 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
715 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
729 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
733 old_addr = new_addr;
734 new_addr = err;
781 return new_addr;
810 return new_addr;
868 unsigned long new_addr, unsigned long new_len, bool *locked,
878 if (offset_in_page(new_addr))
881 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
885 if (addr + old_len > new_addr && new_addr + new_len > addr)
892 * (new_addr, and old_addr), because userspace will not know the
896 * Worst-scenario case is when both vma's (new_addr and old_addr) get
906 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
937 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
945 new_addr = ret;
947 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
977 unsigned long, new_addr)
1046 if (new_addr & ~huge_page_mask(h))
1058 ret = mremap_to(addr, old_len, new_addr, new_len,
1135 new_addr = addr;
1152 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1156 if (IS_ERR_VALUE(new_addr)) {
1157 ret = new_addr;
1161 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1169 mm_populate(new_addr + old_len, new_len - old_len);