Searched refs:vma (Results 26 - 50 of 1003) sorted by relevance

1234567891011>>

/linux-master/arch/mips/include/asm/
H A Dtlbflush.h12 * - flush_tlb_page(vma, vmaddr) flushes one page
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
41 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
44 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
/linux-master/arch/loongarch/include/asm/
H A Dtlbflush.h15 * - flush_tlb_page(vma, vmaddr) flushes one page
16 * - flush_tlb_range(vma, start, end) flushes a range of pages
23 extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
25 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
32 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
41 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
43 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
/linux-master/tools/testing/selftests/bpf/progs/
H A Dbpf_iter_vma_offset.c18 struct vm_area_struct *vma = ctx->vma; local
22 if (task == NULL || vma == NULL)
32 if (vma->vm_start <= address && vma->vm_end > address) {
33 offset = address - vma->vm_start + (vma->vm_pgoff << page_shift);
H A Diters_task_vma.c21 struct vm_area_struct *vma; local
30 bpf_for_each(task_vma, vma, task, 0) {
34 vm_ranges[seen].vm_start = vma->vm_start;
35 vm_ranges[seen].vm_end = vma->vm_end;
H A Dfind_vma_fail1.c13 static long write_vma(struct task_struct *task, struct vm_area_struct *vma, argument
16 /* writing to vma, which is illegal */
17 vma->vm_start = 0xffffffffff600000;
/linux-master/mm/
H A Dmremap.c72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, argument
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, argument
92 pud = alloc_new_pud(mm, vma, addr);
105 static void take_rmap_locks(struct vm_area_struct *vma) argument
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
113 static void drop_rmap_locks(struct vm_area_struct *vma) argument
115 if (vma
136 move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) argument
236 move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) argument
295 move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) argument
304 move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) argument
344 move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) argument
353 move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) argument
393 move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) argument
451 move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, void *old_entry, void *new_entry, bool need_rmap_locks) argument
498 can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, unsigned long mask, bool for_stack) argument
544 move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks, bool for_stack) argument
650 move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) argument
817 struct vm_area_struct *vma; local
874 struct vm_area_struct *vma; local
954 vma_expandable(struct vm_area_struct *vma, unsigned long delta) argument
980 struct vm_area_struct *vma; local
[all...]
H A Dmprotect.c42 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, argument
47 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
55 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
59 if (userfaultfd_pte_wp(vma, pte))
62 if (!(vma->vm_flags & VM_SHARED)) {
69 page = vm_normal_page(vma, addr, pte);
84 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
96 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
101 if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
102 atomic_read(&vma
83 change_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
298 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) argument
313 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) argument
355 change_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
439 change_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
462 change_p4d_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
485 change_protection_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
515 change_protection(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long cp_flags) argument
577 mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument
684 struct vm_area_struct *vma, *prev; local
[all...]
H A Dmadvise.c46 * Any behaviour which results in changes to the vma->vm_flags needs to
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) argument
96 mmap_assert_locked(vma->vm_mm);
98 return vma->anon_name;
102 static int replace_anon_vma_name(struct vm_area_struct *vma, argument
105 struct anon_vma_name *orig_name = anon_vma_name(vma);
108 vma->anon_name = NULL;
116 vma->anon_name = anon_vma_name_reuse(anon_name);
122 static int replace_anon_vma_name(struct vm_area_struct *vma, argument
132 * Update the vm_flags on region of a vma, splittin
137 madvise_update_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long new_flags, struct anon_vma_name *anon_name) argument
174 struct vm_area_struct *vma = walk->private; local
220 shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument
261 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
309 can_do_file_pageout(struct vm_area_struct *vma) argument
332 struct vm_area_struct *vma = walk->vma; local
538 madvise_cold_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
552 can_madv_lru_vma(struct vm_area_struct *vma) argument
557 madvise_cold(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) argument
576 madvise_pageout_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
590 madvise_pageout(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) argument
625 struct vm_area_struct *vma = walk->vma; local
759 madvise_free_single_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) argument
813 madvise_dontneed_single_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
820 madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, unsigned long start, unsigned long *end, int behavior) argument
850 madvise_dontneed_free(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
904 madvise_populate(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
952 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
1002 madvise_vma_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long behavior) argument
1207 madvise_walk_vmas(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long arg, int (*visit)(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long arg)) argument
1213 struct vm_area_struct *vma; local
1266 madvise_vma_anon_name(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long anon_name) argument
[all...]
H A Dnommu.c99 struct vm_area_struct *vma; local
101 vma = find_vma(current->mm, (unsigned long)objp);
102 if (vma)
103 return vma->vm_end - vma->vm_start;
115 * @vma: memory mapping
123 int follow_pfn(struct vm_area_struct *vma, unsigned long address, argument
126 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
170 struct vm_area_struct *vma; local
173 vma
351 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
358 vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num) argument
365 vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num) argument
545 setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) argument
561 cleanup_vma_from_mm(struct vm_area_struct *vma) argument
580 delete_vma_from_mm(struct vm_area_struct *vma) argument
599 delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) argument
639 struct vm_area_struct *vma; local
652 expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) argument
671 struct vm_area_struct *vma; local
893 do_mmap_shared_file(struct vm_area_struct *vma) argument
914 do_mmap_private(struct vm_area_struct *vma, struct vm_region *region, unsigned long len, unsigned long capabilities) argument
1022 struct vm_area_struct *vma; local
1306 split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
1384 vmi_shrink_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long from, unsigned long to) argument
1429 struct vm_area_struct *vma; local
1511 struct vm_area_struct *vma; local
1546 struct vm_area_struct *vma; local
1590 follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) argument
1596 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
1607 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
1617 remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) argument
1650 struct vm_area_struct *vma; local
1734 struct vm_area_struct *vma; local
[all...]
H A Dio-mapping.c9 * @vma: user vma to map to
16 int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, argument
21 if (WARN_ON_ONCE((vma->vm_flags & expected_flags) != expected_flags))
25 return remap_pfn_range_notrack(vma, addr, pfn, size,
27 (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)));
H A Dmmap.c80 struct vm_area_struct *vma, struct vm_area_struct *prev,
89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
90 void vma_set_page_prot(struct vm_area_struct *vma) argument
92 unsigned long vm_flags = vma->vm_flags;
95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96 if (vma_wants_writenotify(vma, vm_page_prot)) {
100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
107 static void __remove_shared_vm_struct(struct vm_area_struct *vma, argument
122 unlink_file_vma(struct vm_area_struct *vma) argument
137 remove_vma(struct vm_area_struct *vma, bool unreachable) argument
285 struct vm_area_struct *vma; local
350 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) argument
359 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) argument
371 struct vm_area_struct *vma; local
384 __vma_link_file(struct vm_area_struct *vma, struct address_space *mapping) argument
395 vma_link_file(struct vm_area_struct *vma) argument
408 vma_link(struct mm_struct *mm, struct vm_area_struct *vma) argument
432 init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, struct vm_area_struct *next, struct vm_area_struct *remove, struct vm_area_struct *remove2) argument
456 init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) argument
633 vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *next) argument
687 vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) argument
719 is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name, bool may_remove_vma) argument
745 is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) argument
772 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name) argument
795 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name) argument
873 struct vm_area_struct *vma, *adjust, *remove, *remove2; local
1115 find_mergeable_anon_vma(struct vm_area_struct *vma) argument
1474 vma_is_shared_writable(struct vm_area_struct *vma) argument
1480 vma_fs_can_writeback(struct vm_area_struct *vma) argument
1494 vma_needs_dirty_tracking(struct vm_area_struct *vma) argument
1517 vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) argument
1707 struct vm_area_struct *vma, *prev; local
1754 struct vm_area_struct *vma, *prev; local
1914 struct vm_area_struct *vma; local
1929 acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) argument
1968 expand_upwards(struct vm_area_struct *vma, unsigned long address) argument
2067 expand_downwards(struct vm_area_struct *vma, unsigned long address) argument
2172 expand_stack_locked(struct vm_area_struct *vma, unsigned long address) argument
2179 struct vm_area_struct *vma, *prev; local
2194 expand_stack_locked(struct vm_area_struct *vma, unsigned long address) argument
2201 struct vm_area_struct *vma; local
2244 struct vm_area_struct *vma, *prev; local
2279 struct vm_area_struct *vma; local
2299 unmap_region(struct mm_struct *mm, struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next, unsigned long start, unsigned long end, unsigned long tree_end, bool mm_wr_locked) argument
2323 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2405 split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2427 vma_modify(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long vm_flags, struct mempolicy *policy, struct vm_userfaultfd_ctx uffd_ctx, struct anon_vma_name *anon_name) argument
2466 vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) argument
2478 vma_merge_extend(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long delta) argument
2505 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) argument
2676 struct vm_area_struct *vma; local
2720 struct vm_area_struct *vma = NULL; local
3001 struct vm_area_struct *vma; local
3091 do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) argument
3113 do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long flags) argument
3195 struct vm_area_struct *vma = NULL; local
3243 struct vm_area_struct *vma; local
3310 insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) argument
3356 struct vm_area_struct *vma = *vmap; local
3480 special_mapping_close(struct vm_area_struct *vma) argument
3484 special_mapping_name(struct vm_area_struct *vma) argument
3502 special_mapping_split(struct vm_area_struct *vma, unsigned long addr) argument
3530 struct vm_area_struct *vma = vmf->vma; local
3565 struct vm_area_struct *vma; local
3594 vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) argument
3624 struct vm_area_struct *vma = __install_special_mapping( local
3714 struct vm_area_struct *vma; local
3810 struct vm_area_struct *vma; local
[all...]
/linux-master/arch/nios2/kernel/
H A Dsys_nios2.c24 struct vm_area_struct *vma; local
45 vma = find_vma(mm, addr);
46 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
51 flush_cache_range(vma, addr, addr + len);
/linux-master/arch/alpha/include/asm/
H A Dcacheflush.h38 flush_icache_user_page(struct vm_area_struct *vma, struct page *page, argument
41 if (vma->vm_flags & VM_EXEC) {
42 struct mm_struct *mm = vma->vm_mm;
51 extern void flush_icache_user_page(struct vm_area_struct *vma,
60 static inline void flush_icache_pages(struct vm_area_struct *vma, argument
63 flush_icache_user_page(vma, page, 0, 0);
/linux-master/drivers/gpu/drm/i915/
H A Di915_mm.h17 int remap_io_mapping(struct vm_area_struct *vma,
22 int remap_io_mapping(struct vm_area_struct *vma, argument
31 int remap_io_sg(struct vm_area_struct *vma,
/linux-master/drivers/gpu/drm/xe/compat-i915-headers/
H A Di915_vma.h27 #define i915_vma_fence_id(vma) -1
29 static inline u32 i915_ggtt_offset(const struct i915_vma *vma) argument
31 return vma->node.start;
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_bo74c1.c48 0x0308, upper_32_bits(mem->vma[0].addr),
49 0x030c, lower_32_bits(mem->vma[0].addr),
50 0x0310, upper_32_bits(mem->vma[1].addr),
51 0x0314, lower_32_bits(mem->vma[1].addr),
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dtlbflush.h47 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, argument
51 radix__flush_pmd_tlb_range(vma, start, end);
55 static inline void flush_pud_tlb_range(struct vm_area_struct *vma, argument
59 radix__flush_pud_tlb_range(vma, start, end);
63 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, argument
68 radix__flush_hugetlb_tlb_range(vma, start, end);
71 static inline void flush_tlb_range(struct vm_area_struct *vma, argument
75 radix__flush_tlb_range(vma, start, end);
91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, argument
95 radix__local_flush_tlb_page(vma, vmadd
120 flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) argument
132 flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
[all...]
/linux-master/arch/hexagon/include/asm/
H A Dtlbflush.h26 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
27 extern void flush_tlb_range(struct vm_area_struct *vma,
H A Dcacheflush.h18 * - flush_cache_range(vma, start, end) flushes a range of pages
21 * - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache
62 struct vm_area_struct *vma, unsigned long address,
68 #define update_mmu_cache(vma, addr, ptep) \
69 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
71 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
61 update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) argument
/linux-master/arch/um/include/asm/
H A Dtlbflush.h17 * - flush_tlb_page(vma, vmaddr) flushes one page
19 * - flush_tlb_range(vma, start, end) flushes a range of pages
24 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
26 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
/linux-master/arch/sparc/include/asm/
H A Dcacheflush_32.h14 #define flush_cache_range(vma,start,end) \
15 sparc32_cachetlb_ops->cache_range(vma, start, end)
16 #define flush_cache_page(vma,addr,pfn) \
17 sparc32_cachetlb_ops->cache_page(vma, addr)
20 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
22 flush_cache_page(vma, vaddr, page_to_pfn(page));\
25 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
27 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dcacheflush_64.h24 #define flush_cache_range(vma, start, end) \
25 flush_cache_mm((vma)->vm_mm)
26 #define flush_cache_page(vma, page, pfn) \
27 flush_cache_mm((vma)->vm_mm)
60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
64 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
67 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
69 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
71 flush_ptrace_access(vma, pag
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.h104 * DOC: Provide accessors for vma members to facilitate easy change of
107 static inline u64 xe_vma_start(struct xe_vma *vma) argument
109 return vma->gpuva.va.addr;
112 static inline u64 xe_vma_size(struct xe_vma *vma) argument
114 return vma->gpuva.va.range;
117 static inline u64 xe_vma_end(struct xe_vma *vma) argument
119 return xe_vma_start(vma) + xe_vma_size(vma);
122 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) argument
124 return vma
127 xe_vma_bo(struct xe_vma *vma) argument
133 xe_vma_vm(struct xe_vma *vma) argument
138 xe_vma_read_only(struct xe_vma *vma) argument
143 xe_vma_userptr(struct xe_vma *vma) argument
148 xe_vma_is_null(struct xe_vma *vma) argument
153 xe_vma_has_no_bo(struct xe_vma *vma) argument
158 xe_vma_is_userptr(struct xe_vma *vma) argument
169 to_userptr_vma(struct xe_vma *vma) argument
[all...]
/linux-master/arch/microblaze/include/asm/
H A Dtlbflush.h27 static inline void local_flush_tlb_page(struct vm_area_struct *vma, argument
30 static inline void local_flush_tlb_range(struct vm_area_struct *vma, argument
36 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
37 #define update_mmu_cache(vma, addr, pte) \
38 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_dsb_buffer.c15 return i915_ggtt_offset(dsb_buf->vma);
39 struct i915_vma *vma; local
55 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
56 if (IS_ERR(vma)) {
61 buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
63 i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
67 dsb_buf->vma = vma;
76 i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
81 i915_gem_object_flush_map(dsb_buf->vma
[all...]

Completed in 215 milliseconds

1234567891011>>