• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/mm/

Lines Matching defs:address

186  * At what user virtual address is page expected in vma?
192 unsigned long address;
194 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
195 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
200 return address;
204 * At what user virtual address is page expected in vma? checking that the
223 * Check that @page is mapped at @address into @mm.
228 unsigned long address, spinlock_t **ptlp)
236 pgd = pgd_offset(mm, address);
240 pud = pud_offset(pgd, address);
244 pmd = pmd_offset(pud, address);
248 pte = pte_offset_map(pmd, address);
273 unsigned long address;
278 address = vma_address(page, vma);
279 if (address == -EFAULT)
282 pte = page_check_address(page, mm, address, &ptl);
286 if (ptep_clear_flush_young(vma, address, pte))
418 unsigned long address;
423 address = vma_address(page, vma);
424 if (address == -EFAULT)
427 pte = page_check_address(page, mm, address, &ptl);
434 flush_cache_page(vma, address, pte_pfn(*pte));
435 entry = ptep_clear_flush(vma, address, pte);
438 set_pte_at(mm, address, pte, entry);
490 * @address: the user virtual address mapped
493 struct vm_area_struct *vma, unsigned long address)
501 page->index = linear_page_index(vma, address);
514 * @address: the user virtual address mapped
517 struct vm_area_struct *vma, unsigned long address)
535 BUG_ON(page->index != linear_page_index(vma, address));
543 * @address: the user virtual address mapped
548 struct vm_area_struct *vma, unsigned long address)
551 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
553 __page_set_anon_rmap(page, vma, address);
555 __page_check_anon_rmap(page, vma, address);
562 * @address: the user virtual address mapped
569 struct vm_area_struct *vma, unsigned long address)
571 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
573 __page_set_anon_rmap(page, vma, address);
599 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
603 __page_check_anon_rmap(page, vma, address);
657 unsigned long address;
663 address = vma_address(page, vma);
664 if (address == -EFAULT)
667 pte = page_check_address(page, mm, address, &ptl);
677 (ptep_clear_flush_young(vma, address, pte)))) {
683 flush_cache_page(vma, address, page_to_pfn(page));
684 pteval = ptep_clear_flush(vma, address, pte);
720 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
728 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
752 * around the vma's virtual address space.
776 unsigned long address;
779 address = (vma->vm_start + cursor) & CLUSTER_MASK;
780 end = address + CLUSTER_SIZE;
781 if (address < vma->vm_start)
782 address = vma->vm_start;
786 pgd = pgd_offset(mm, address);
790 pud = pud_offset(pgd, address);
794 pmd = pmd_offset(pud, address);
798 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
803 for (; address < end; pte++, address += PAGE_SIZE) {
806 page = vm_normal_page(vma, address, *pte);
809 if (ptep_clear_flush_young(vma, address, pte))
813 flush_cache_page(vma, address, pte_pfn(*pte));
814 pteval = ptep_clear_flush(vma, address, pte);
817 if (page->index != linear_page_index(vma, address))
818 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));