• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching refs:address

357  * At what user virtual address is page expected in @vma?
358 * Returns virtual address or -EFAULT if page's index/offset is not
365 unsigned long address;
369 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
370 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
374 return address;
378 * At what user virtual address is page expected in vma?
402 * Check that @page is mapped at @address into @mm.
411 unsigned long address, spinlock_t **ptlp, int sync)
420 pte = huge_pte_offset(mm, address);
425 pgd = pgd_offset(mm, address);
429 pud = pud_offset(pgd, address);
433 pmd = pmd_offset(pud, address);
437 pte = pte_offset_map(pmd, address);
466 unsigned long address;
470 address = vma_address(page, vma);
471 if (address == -EFAULT) /* out of vma range */
473 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
486 unsigned long address, unsigned int *mapcount,
494 pte = page_check_address(page, mm, address, &ptl, 0);
509 if (ptep_clear_flush_young_notify(vma, address, pte)) {
553 unsigned long address = vma_address(page, vma);
554 if (address == -EFAULT)
563 referenced += page_referenced_one(page, vma, address,
621 unsigned long address = vma_address(page, vma);
622 if (address == -EFAULT)
631 referenced += page_referenced_one(page, vma, address,
688 unsigned long address)
695 pte = page_check_address(page, mm, address, &ptl, 1);
702 flush_cache_page(vma, address, pte_pfn(*pte));
703 entry = ptep_clear_flush_notify(vma, address, pte);
706 set_pte_at(mm, address, pte, entry);
727 unsigned long address = vma_address(page, vma);
728 if (address == -EFAULT)
730 ret += page_mkclean_one(page, vma, address);
762 * @address: the user virtual address mapped
770 struct vm_area_struct *vma, unsigned long address)
776 VM_BUG_ON(page->index != linear_page_index(vma, address));
786 * @address: the user virtual address mapped
790 struct vm_area_struct *vma, unsigned long address, int exclusive)
818 page->index = linear_page_index(vma, address);
825 * @address: the user virtual address mapped
828 struct vm_area_struct *vma, unsigned long address)
844 BUG_ON(page->index != linear_page_index(vma, address));
852 * @address: the user virtual address mapped
860 struct vm_area_struct *vma, unsigned long address)
862 do_page_add_anon_rmap(page, vma, address, 0);
871 struct vm_area_struct *vma, unsigned long address, int exclusive)
880 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
882 __page_set_anon_rmap(page, vma, address, exclusive);
884 __page_check_anon_rmap(page, vma, address);
891 * @address: the user virtual address mapped
898 struct vm_area_struct *vma, unsigned long address)
900 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
904 __page_set_anon_rmap(page, vma, address, 1);
977 unsigned long address, enum ttu_flags flags)
985 pte = page_check_address(page, mm, address, &ptl, 0);
1002 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1009 flush_cache_page(vma, address, page_to_pfn(page));
1010 pteval = ptep_clear_flush_notify(vma, address, pte);
1024 set_pte_at(mm, address, pte,
1035 set_pte_at(mm, address, pte, pteval);
1056 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1062 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1105 * around the vma's virtual address space.
1134 unsigned long address;
1139 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1140 end = address + CLUSTER_SIZE;
1141 if (address < vma->vm_start)
1142 address = vma->vm_start;
1146 pgd = pgd_offset(mm, address);
1150 pud = pud_offset(pgd, address);
1154 pmd = pmd_offset(pud, address);
1168 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1173 for (; address < end; pte++, address += PAGE_SIZE) {
1176 page = vm_normal_page(vma, address, *pte);
1186 if (ptep_clear_flush_young_notify(vma, address, pte))
1190 flush_cache_page(vma, address, pte_pfn(*pte));
1191 pteval = ptep_clear_flush_notify(vma, address, pte);
1194 if (page->index != linear_page_index(vma, address))
1195 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1254 unsigned long address;
1268 address = vma_address(page, vma);
1269 if (address == -EFAULT)
1271 ret = try_to_unmap_one(page, vma, address, flags);
1309 unsigned long address = vma_address(page, vma);
1310 if (address == -EFAULT)
1312 ret = try_to_unmap_one(page, vma, address, flags);
1509 unsigned long address = vma_address(page, vma);
1510 if (address == -EFAULT)
1512 ret = rmap_one(page, vma, address, arg);
1533 unsigned long address = vma_address(page, vma);
1534 if (address == -EFAULT)
1536 ret = rmap_one(page, vma, address, arg);
1570 struct vm_area_struct *vma, unsigned long address, int exclusive)
1583 page->index = linear_page_index(vma, address);
1587 struct vm_area_struct *vma, unsigned long address)
1594 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1597 __hugepage_set_anon_rmap(page, vma, address, 0);
1601 struct vm_area_struct *vma, unsigned long address)
1603 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1605 __hugepage_set_anon_rmap(page, vma, address, 1);