Searched refs:vma (Results 101 - 125 of 1004) sorted by last modified time

1234567891011>>

/linux-master/mm/
H A Dhugetlb.c96 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
97 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
98 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
99 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
101 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
258 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) argument
260 return subpool_inode(file_inode(vma->vm_file));
266 void hugetlb_vma_lock_read(struct vm_area_struct *vma) argument
268 if (__vma_shareable_lock(vma)) {
269 struct hugetlb_vma_lock *vma_lock = vma
279 hugetlb_vma_unlock_read(struct vm_area_struct *vma) argument
292 hugetlb_vma_lock_write(struct vm_area_struct *vma) argument
305 hugetlb_vma_unlock_write(struct vm_area_struct *vma) argument
318 hugetlb_vma_trylock_write(struct vm_area_struct *vma) argument
334 hugetlb_vma_assert_locked(struct vm_area_struct *vma) argument
357 struct vm_area_struct *vma = vma_lock->vma; local
370 __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) argument
384 hugetlb_vma_lock_free(struct vm_area_struct *vma) argument
400 hugetlb_vma_lock_alloc(struct vm_area_struct *vma) argument
990 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
1006 vma_kernel_pagesize(struct vm_area_struct *vma) argument
1020 vma_mmu_pagesize(struct vm_area_struct *vma) argument
1053 get_vma_private_data(struct vm_area_struct *vma) argument
1058 set_vma_private_data(struct vm_area_struct *vma, unsigned long value) argument
1148 vma_resv_map(struct vm_area_struct *vma) argument
1163 set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) argument
1171 set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) argument
1179 is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) argument
1186 __vma_private_lock(struct vm_area_struct *vma) argument
1193 hugetlb_dup_vma_private(struct vm_area_struct *vma) argument
1224 clear_vma_resv_huge_pages(struct vm_area_struct *vma) argument
1249 vma_has_reserves(struct vm_area_struct *vma, long chg) argument
1390 dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) argument
2580 alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2802 __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) argument
2882 vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2888 vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2894 vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2900 vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2906 vma_del_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2932 restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct folio *folio) argument
3132 alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) argument
5173 hugetlb_vm_op_open(struct vm_area_struct *vma) argument
5211 hugetlb_vm_op_close(struct vm_area_struct *vma) argument
5242 hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) argument
5268 hugetlb_vm_op_pagesize(struct vm_area_struct *vma) argument
5300 make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) argument
5319 set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
5356 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) argument
5559 move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, unsigned long sz) argument
5586 move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) argument
5650 __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) argument
5806 __hugetlb_zap_begin(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
5818 __hugetlb_zap_end(struct vm_area_struct *vma, struct zap_details *details) argument
5845 unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) argument
5870 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument
5926 hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int flags, struct folio *pagecache_folio, spinlock_t *ptl, struct vm_fault *vmf) argument
6119 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
6196 hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, pte_t old_pte, unsigned int flags, struct vm_fault *vmf) argument
6432 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
6635 alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
6876 hugetlb_follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument
6947 hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
7095 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) argument
7297 page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) argument
7325 want_pmd_share(struct vm_area_struct *vma, unsigned long addr) argument
7351 adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
7382 huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) argument
7439 huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
7460 huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) argument
7466 huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
7472 adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
7477 want_pmd_share(struct vm_area_struct *vma, unsigned long addr) argument
7484 huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) argument
7689 hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
7739 hugetlb_unshare_all_pmds(struct vm_area_struct *vma) argument
[all...]
H A Dmemory-failure.c388 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, argument
399 VM_BUG_ON_VMA(address == -EFAULT, vma);
400 pgd = pgd_offset(vma->vm_mm, address);
444 struct vm_area_struct *vma, struct list_head *to_kill,
455 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
458 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
459 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
487 struct vm_area_struct *vma,
490 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
507 struct vm_area_struct *vma, struc
443 __add_to_kill(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long ksm_addr, pgoff_t fsdax_pgoff) argument
486 add_to_kill_anon_file(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill) argument
506 add_to_kill_ksm(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long ksm_addr) argument
613 struct vm_area_struct *vma; local
650 struct vm_area_struct *vma; local
681 add_to_kill_fsdax(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, pgoff_t pgoff) argument
695 struct vm_area_struct *vma; local
[all...]
H A Dmadvise.c46 * Any behaviour which results in changes to the vma->vm_flags needs to
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) argument
96 mmap_assert_locked(vma->vm_mm);
98 return vma->anon_name;
102 static int replace_anon_vma_name(struct vm_area_struct *vma, argument
105 struct anon_vma_name *orig_name = anon_vma_name(vma);
108 vma->anon_name = NULL;
116 vma->anon_name = anon_vma_name_reuse(anon_name);
122 static int replace_anon_vma_name(struct vm_area_struct *vma, argument
132 * Update the vm_flags on region of a vma, splittin
137 madvise_update_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long new_flags, struct anon_vma_name *anon_name) argument
174 struct vm_area_struct *vma = walk->private; local
220 shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument
261 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
309 can_do_file_pageout(struct vm_area_struct *vma) argument
332 struct vm_area_struct *vma = walk->vma; local
538 madvise_cold_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
552 can_madv_lru_vma(struct vm_area_struct *vma) argument
557 madvise_cold(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) argument
576 madvise_pageout_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
590 madvise_pageout(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) argument
625 struct vm_area_struct *vma = walk->vma; local
759 madvise_free_single_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) argument
813 madvise_dontneed_single_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
820 madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, unsigned long start, unsigned long *end, int behavior) argument
850 madvise_dontneed_free(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
904 madvise_populate(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
952 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
1002 madvise_vma_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long behavior) argument
1207 madvise_walk_vmas(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long arg, int (*visit)(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long arg)) argument
1213 struct vm_area_struct *vma; local
1266 madvise_vma_anon_name(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long anon_name) argument
[all...]
H A Dinternal.h213 struct vm_area_struct *vma,
687 extern long populate_vma_page_range(struct vm_area_struct *vma,
710 folio_within_range(struct folio *folio, struct vm_area_struct *vma, argument
714 unsigned long vma_pglen = vma_pages(vma);
720 if (start < vma->vm_start)
721 start = vma->vm_start;
723 if (end > vma->vm_end)
724 end = vma->vm_end;
728 /* if folio start address is not in vma range */
729 if (!in_range(pgoff, vma
738 folio_within_vma(struct folio *folio, struct vm_area_struct *vma) argument
753 mlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) argument
769 munlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) argument
797 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, struct vm_area_struct *vma) argument
823 vma_address(struct page *page, struct vm_area_struct *vma) argument
835 struct vm_area_struct *vma = pvmw->vma; local
1157 gup_must_unshare(struct vm_area_struct *vma, unsigned int flags, struct page *page) argument
1215 vma_set_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) argument
1224 vma_soft_dirty_enabled(struct vm_area_struct *vma) argument
1251 vma_iter_prealloc(struct vma_iterator *vmi, struct vm_area_struct *vma) argument
1268 vma_iter_store(struct vma_iterator *vmi, struct vm_area_struct *vma) argument
1295 vma_iter_store_gfp(struct vma_iterator *vmi, struct vm_area_struct *vma, gfp_t gfp) argument
1314 struct vm_area_struct *vma; member in struct:vma_prepare
[all...]
H A Dhuge_memory.c82 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, argument
88 orders &= vma_is_anonymous(vma) ?
93 if (!vma->vm_mm) /* vdso */
102 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
110 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
111 if (vma_is_dax(vma))
123 * Check alignment for file vma and size for both file and anon vma by
134 addr = vma->vm_end - (PAGE_SIZE << order);
135 if (thp_vma_suitable_order(vma, add
762 maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) argument
871 struct vm_area_struct *vma = vmf->vma; local
955 vma_thp_gfp_mask(struct vm_area_struct *vma) argument
982 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) argument
998 struct vm_area_struct *vma = vmf->vma; local
1057 insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) argument
1117 struct vm_area_struct *vma = vmf->vma; local
1149 maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) argument
1156 insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, pfn_t pfn, bool write) argument
1206 struct vm_area_struct *vma = vmf->vma; local
1231 touch_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, bool write) argument
1244 follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) argument
1388 touch_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, bool write) argument
1401 follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) argument
1444 copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma) argument
1516 struct vm_area_struct *vma = vmf->vma; local
1600 can_change_pmd_writable(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
1631 can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument
1668 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) argument
1715 struct vm_area_struct *vma = vmf->vma; local
1802 madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next) argument
1881 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) argument
1948 pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, spinlock_t *old_pmd_ptl, struct vm_area_struct *vma) argument
1973 move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) argument
2029 change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) argument
2307 __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) argument
2324 __pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) argument
2336 zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr) argument
2357 __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, unsigned long haddr) argument
2370 __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address) argument
2391 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) argument
2432 __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) argument
2649 __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) argument
2684 split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct folio *folio) argument
2695 split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) argument
2707 vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) argument
3419 vma_not_suitable_for_thp_split(struct vm_area_struct *vma) argument
3466 struct vm_area_struct *vma = vma_lookup(mm, addr); local
3677 struct vm_area_struct *vma = pvmw->vma; local
3726 struct vm_area_struct *vma = pvmw->vma; local
[all...]
H A Dgup.c503 static struct page *no_page_table(struct vm_area_struct *vma, argument
515 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
520 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, argument
532 set_pte_at(vma->vm_mm, address, pte, entry);
533 update_mmu_cache(vma, address, pte);
543 struct vm_area_struct *vma,
555 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
559 if (!(vma->vm_flags & VM_MAYWRITE))
563 if (vma
542 can_follow_write_pte(pte_t pte, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument
579 follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) argument
691 follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) argument
743 follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) argument
770 follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) argument
811 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) argument
837 follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) argument
859 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument
919 faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, bool unshare, int *locked) argument
1014 writable_file_mapping_allowed(struct vm_area_struct *vma, unsigned long gup_flags) argument
1032 check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) argument
1192 struct vm_area_struct *vma = NULL; local
1345 vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) argument
1401 struct vm_area_struct *vma; local
1650 populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked) argument
1766 struct vm_area_struct *vma = NULL; local
1820 struct vm_area_struct *vma; local
[all...]
/linux-master/include/linux/
H A Dmm.h184 * per a vma. In ELF, the number of sections is represented in unsigned short.
251 void __vm_area_free(struct vm_area_struct *vma);
294 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
312 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
313 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
511 * vm_fault is filled by the pagefault handler and passed to the vma's
512 * ->fault function. The vma's ->fault is responsible for returning a bitmask
522 struct vm_area_struct *vma; /* Target VMA */ member in struct:vm_fault::__anon82
524 pgoff_t pgoff; /* Logical page offset based on vma */
586 int (*mprotect)(struct vm_area_struct *vma, unsigne
646 vma_numab_state_init(struct vm_area_struct *vma) argument
650 vma_numab_state_free(struct vm_area_struct *vma) argument
655 vma_numab_state_init(struct vm_area_struct *vma) argument
656 vma_numab_state_free(struct vm_area_struct *vma) argument
665 vma_start_read(struct vm_area_struct *vma) argument
698 vma_end_read(struct vm_area_struct *vma) argument
706 __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) argument
723 vma_start_write(struct vm_area_struct *vma) argument
741 vma_assert_write_locked(struct vm_area_struct *vma) argument
748 vma_assert_locked(struct vm_area_struct *vma) argument
754 vma_mark_detached(struct vm_area_struct *vma, bool detached) argument
783 vma_start_read(struct vm_area_struct *vma) argument
785 vma_end_read(struct vm_area_struct *vma) argument
786 vma_start_write(struct vm_area_struct *vma) argument
787 vma_assert_write_locked(struct vm_area_struct *vma) argument
789 vma_mark_detached(struct vm_area_struct *vma, bool detached) argument
798 vma_assert_locked(struct vm_area_struct *vma) argument
821 vma_init(struct vm_area_struct *vma, struct mm_struct *mm) argument
832 vm_flags_init(struct vm_area_struct *vma, vm_flags_t flags) argument
843 vm_flags_reset(struct vm_area_struct *vma, vm_flags_t flags) argument
850 vm_flags_reset_once(struct vm_area_struct *vma, vm_flags_t flags) argument
857 vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags) argument
864 vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags) argument
875 __vm_flags_mod(struct vm_area_struct *vma, vm_flags_t set, vm_flags_t clear) argument
885 vm_flags_mod(struct vm_area_struct *vma, vm_flags_t set, vm_flags_t clear) argument
892 vma_set_anonymous(struct vm_area_struct *vma) argument
897 vma_is_anonymous(struct vm_area_struct *vma) argument
906 vma_is_initial_heap(const struct vm_area_struct *vma) argument
916 vma_is_initial_stack(const struct vm_area_struct *vma) argument
927 vma_is_temporary_stack(struct vm_area_struct *vma) argument
941 vma_is_foreign(struct vm_area_struct *vma) argument
952 vma_is_accessible(struct vm_area_struct *vma) argument
963 vma_is_shared_maywrite(struct vm_area_struct *vma) argument
1033 vma_iter_bulk_store(struct vma_iterator *vmi, struct vm_area_struct *vma) argument
1070 vma_is_shmem(struct vm_area_struct *vma) argument
1071 vma_is_anon_shmem(struct vm_area_struct *vma) argument
1362 maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) argument
1759 vma_set_access_pid_bit(struct vm_area_struct *vma) argument
1818 vma_set_access_pid_bit(struct vm_area_struct *vma) argument
1982 folio_needs_cow_for_dma(struct vm_area_struct *vma, struct folio *folio) argument
2381 zap_vma_pages(struct vm_area_struct *vma) argument
2427 handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) argument
2480 struct vm_area_struct *vma; local
2556 vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) argument
3299 vma_modify_flags(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags) argument
3312 vma_modify_flags_name(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, struct anon_vma_name *new_name) argument
3326 vma_modify_policy(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct mempolicy *new_pol) argument
3338 vma_modify_flags_uffd(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, struct vm_userfaultfd_ctx new_ctx) argument
3483 stack_guard_start_gap(struct vm_area_struct *vma) argument
3495 vm_start_gap(struct vm_area_struct *vma) argument
3506 vm_end_gap(struct vm_area_struct *vma) argument
3518 vma_pages(struct vm_area_struct *vma) argument
3527 struct vm_area_struct *vma = vma_lookup(mm, vm_start); local
3535 range_in_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
3549 vma_set_page_prot(struct vm_area_struct *vma) argument
3585 vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
3599 io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
3652 gup_can_follow_protnone(struct vm_area_struct *vma, unsigned int flags) argument
4095 vma_is_special_huge(const struct vm_area_struct *vma) argument
4145 seal_check_write(int seals, struct vm_area_struct *vma) argument
[all...]
H A Dswapops.h337 extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
361 static inline void migration_entry_wait_huge(struct vm_area_struct *vma, argument
H A Dshmem_fs.h125 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
127 static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma) argument
/linux-master/fs/ntfs3/
H A Dfile.c254 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) argument
259 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
260 bool rw = vma->vm_flags & VM_WRITE;
283 from + vma->vm_end - vma->vm_start);
314 err = generic_file_mmap(file, vma);
/linux-master/drivers/video/fbdev/core/
H A Dfb_defio.c98 struct fb_info *info = vmf->vma->vm_private_data;
110 if (vmf->vma->vm_file)
111 page->mapping = vmf->vma->vm_file->f_mapping;
202 file_update_time(vmf->vma->vm_file);
210 struct fb_info *info = vmf->vma->vm_private_data;
224 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) argument
226 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
228 vma->vm_ops = &fb_deferred_io_vm_ops;
229 vm_flags_set(vma, VM_DONTEXPAN
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_svm.c1664 struct vm_area_struct *vma; local
1670 vma = vma_lookup(mm, addr);
1671 if (vma) {
1672 readonly = !(vma->vm_flags & VM_WRITE);
1674 next = min(vma->vm_end, end);
2657 struct vm_area_struct *vma; local
2662 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2663 if (!vma) {
2668 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2873 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) argument
2900 struct vm_area_struct *vma; local
3275 struct vm_area_struct *vma; local
[all...]
H A Dkfd_migrate.c243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) argument
247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
382 struct vm_area_struct *vma, uint64_t start,
398 migrate.vma = vma;
421 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
487 struct vm_area_struct *vma; local
531 vma = vma_lookup(mm, addr);
532 if (!vma)
535 next = min(vma
381 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) argument
680 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, struct page *fault_page) argument
792 struct vm_area_struct *vma; local
[all...]
/linux-master/arch/loongarch/mm/
H A Dfault.c141 struct vm_area_struct *vma = NULL; local
178 vma = lock_mm_and_find_vma(mm, address, regs);
179 if (unlikely(!vma))
202 if (!(vma->vm_flags & VM_WRITE))
205 if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
207 if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
216 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/arch/loongarch/include/asm/
H A Dtlb.h142 struct vm_area_struct vma; local
144 vma.vm_mm = tlb->mm;
145 vm_flags_init(&vma, 0);
151 flush_tlb_range(&vma, tlb->start, tlb->end);
/linux-master/arch/arc/mm/
H A Dtlb.c190 * (Android Binder ended up calling this for vma->mm != tsk->mm,
208 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
222 local_flush_tlb_mm(vma->vm_mm);
235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
278 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
344 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) argument
347 .ta_vma = vma,
354 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
367 flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
394 create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) argument
472 update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr) argument
530 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) argument
537 local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/arch/arc/include/asm/
H A Dpgtable-bits-arcv2.h104 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
107 #define update_mmu_cache(vma, addr, ptep) \
108 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
/linux-master/virt/kvm/
H A Dkvm_main.c2645 struct vm_area_struct *vma;
2655 vma = find_vma(current->mm, addr);
2656 if (!vma)
2659 size = vma_kernel_pagesize(vma);
2834 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2836 if (unlikely(!(vma->vm_flags & VM_READ)))
2839 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2855 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2865 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2880 r = follow_pte(vma
2684 struct vm_area_struct *vma; local
2873 vma_is_valid(struct vm_area_struct *vma, bool write_fault) argument
2894 hva_to_pfn_remapped(struct vm_area_struct *vma, unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *p_pfn) argument
2980 struct vm_area_struct *vma; local
4169 kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) argument
4671 kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) argument
[all...]
/linux-master/fs/sysfs/
H A Dfile.c162 struct vm_area_struct *vma)
167 return battr->mmap(of->file, kobj, battr, vma);
161 sysfs_kf_bin_mmap(struct kernfs_open_file *of, struct vm_area_struct *vma) argument
/linux-master/drivers/android/
H A Dbinder.c3245 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
5563 static void binder_vma_open(struct vm_area_struct *vma) argument
5565 struct binder_proc *proc = vma->vm_private_data;
5568 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5569 proc->pid, vma->vm_start, vma->vm_end,
5570 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5571 (unsigned long)pgprot_val(vma
5574 binder_vma_close(struct vm_area_struct *vma) argument
5597 binder_mmap(struct file *filp, struct vm_area_struct *vma) argument
[all...]
/linux-master/kernel/
H A Dfork.c439 static bool vma_lock_alloc(struct vm_area_struct *vma) argument
441 vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
442 if (!vma->vm_lock)
445 init_rwsem(&vma->vm_lock->lock);
446 vma->vm_lock_seq = -1;
451 static inline void vma_lock_free(struct vm_area_struct *vma) argument
453 kmem_cache_free(vma_lock_cachep, vma->vm_lock);
458 static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } argument
459 static inline void vma_lock_free(struct vm_area_struct *vma) {} argument
465 struct vm_area_struct *vma; local
505 __vm_area_free(struct vm_area_struct *vma) argument
516 struct vm_area_struct *vma = container_of(head, struct vm_area_struct, local
525 vm_area_free(struct vm_area_struct *vma) argument
1443 struct vm_area_struct *vma; local
[all...]
/linux-master/fs/fuse/
H A Dfile.c2487 static void fuse_vma_close(struct vm_area_struct *vma) argument
2491 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2492 mapping_set_error(vma->vm_file->f_mapping, err);
2513 struct inode *inode = file_inode(vmf->vma->vm_file);
2515 file_update_time(vmf->vma->vm_file);
2533 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) argument
2542 return fuse_dax_mmap(file, vma);
2550 return fuse_passthrough_mmap(file, vma);
2563 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
2568 if (!(vma
[all...]
H A Dfuse_i.h1376 int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma);
1473 ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma);
/linux-master/fs/9p/
H A Dvfs_file.c457 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) argument
467 return generic_file_readonly_mmap(filp, vma);
470 retval = generic_file_mmap(filp, vma);
472 vma->vm_ops = &v9fs_mmap_file_vm_ops;
483 static void v9fs_mmap_vm_close(struct vm_area_struct *vma) argument
490 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
492 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
493 (vma->vm_end - vma->vm_start - 1),
496 if (!(vma
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dnv50.c185 struct nvkm_vma *vma, void *argv, u32 argc)
188 return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
184 nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) argument

Completed in 378 milliseconds

1234567891011>>