Searched refs:vma (Results 101 - 125 of 1013) sorted by relevance

1234567891011>>

/linux-master/arch/x86/entry/vdso/
H A Dvma.c63 struct vm_area_struct *vma, struct vm_fault *vmf)
65 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
114 struct vm_area_struct *vma; local
118 for_each_vma(vmi, vma) {
119 if (vma_is_special_mapping(vma, &vvar_mapping))
120 zap_vma_pages(vma);
129 struct vm_area_struct *vma, struct vm_fault *vmf)
131 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
152 struct page *timens_page = find_timens_vvar_page(vma);
174 err = vmf_insert_pfn(vma, add
62 vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
128 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
226 struct vm_area_struct *vma; local
280 struct vm_area_struct *vma; local
[all...]
/linux-master/arch/powerpc/mm/nohash/
H A De500_hugetlbpage.c119 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) argument
131 mm = vma->vm_mm;
133 psize = vma_mmu_pagesize(vma);
181 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
183 if (is_vm_hugetlb_page(vma))
184 book3e_hugetlb_preload(vma, address, *ptep);
187 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) argument
189 struct hstate *hstate = hstate_file(vma->vm_file);
192 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
/linux-master/drivers/gpu/drm/xe/display/
H A Dxe_fb_pin.c82 struct i915_vma *vma)
155 vma->dpt = dpt;
156 vma->node = dpt->ggtt_node;
186 struct i915_vma *vma)
207 vma->node = bo->ggtt_node;
211 ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
220 xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
229 ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
234 ggtt_ofs = vma->node.start;
257 struct i915_vma *vma local
80 __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, const struct i915_gtt_view *view, struct i915_vma *vma) argument
184 __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, const struct i915_gtt_view *view, struct i915_vma *vma) argument
318 __xe_unpin_fb_vma(struct i915_vma *vma) argument
347 intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) argument
356 struct i915_vma *vma; local
[all...]
/linux-master/drivers/infiniband/core/
H A Dib_core_uverbs.c12 * rdma_umap_priv_init() - Initialize the private data of a vma
15 * @vma: The vm area struct that needs private data
17 * this vma
31 struct vm_area_struct *vma,
34 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
36 priv->vma = vma;
41 vma->vm_private_data = priv;
54 * @vma: the vma relate
30 rdma_umap_priv_init(struct rdma_umap_priv *priv, struct vm_area_struct *vma, struct rdma_user_mmap_entry *entry) argument
67 rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, unsigned long pfn, unsigned long size, pgprot_t prot, struct rdma_user_mmap_entry *entry) argument
161 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/x86/include/asm/
H A Dpkeys.h33 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
35 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, argument
41 return __arch_override_mprotect_pkey(vma, prot, pkey);
118 static inline int vma_pkey(struct vm_area_struct *vma) argument
123 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
/linux-master/arch/powerpc/include/asm/
H A Dhugetlb.h46 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, argument
51 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
52 flush_hugetlb_page(vma, addr);
57 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 static inline void flush_hugetlb_page(struct vm_area_struct *vma, argument
/linux-master/mm/
H A Dmincore.c73 struct vm_area_struct *vma, unsigned char *vec)
78 if (vma->vm_file) {
81 pgoff = linear_page_index(vma, addr);
83 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
96 walk->vma, walk->private);
104 struct vm_area_struct *vma = walk->vma; local
109 ptl = pmd_trans_huge_lock(pmd, vma);
127 vma, vec);
158 static inline bool can_do_mincore(struct vm_area_struct *vma) argument
72 __mincore_unmapped_range(unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec) argument
189 struct vm_area_struct *vma; local
[all...]
H A Dhugetlb.c96 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
97 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
98 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
99 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
101 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
258 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) argument
260 return subpool_inode(file_inode(vma->vm_file));
266 void hugetlb_vma_lock_read(struct vm_area_struct *vma) argument
268 if (__vma_shareable_lock(vma)) {
269 struct hugetlb_vma_lock *vma_lock = vma
279 hugetlb_vma_unlock_read(struct vm_area_struct *vma) argument
292 hugetlb_vma_lock_write(struct vm_area_struct *vma) argument
305 hugetlb_vma_unlock_write(struct vm_area_struct *vma) argument
318 hugetlb_vma_trylock_write(struct vm_area_struct *vma) argument
334 hugetlb_vma_assert_locked(struct vm_area_struct *vma) argument
357 struct vm_area_struct *vma = vma_lock->vma; local
370 __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) argument
384 hugetlb_vma_lock_free(struct vm_area_struct *vma) argument
400 hugetlb_vma_lock_alloc(struct vm_area_struct *vma) argument
990 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
1006 vma_kernel_pagesize(struct vm_area_struct *vma) argument
1020 vma_mmu_pagesize(struct vm_area_struct *vma) argument
1053 get_vma_private_data(struct vm_area_struct *vma) argument
1058 set_vma_private_data(struct vm_area_struct *vma, unsigned long value) argument
1148 vma_resv_map(struct vm_area_struct *vma) argument
1163 set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) argument
1171 set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) argument
1179 is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) argument
1186 __vma_private_lock(struct vm_area_struct *vma) argument
1193 hugetlb_dup_vma_private(struct vm_area_struct *vma) argument
1224 clear_vma_resv_huge_pages(struct vm_area_struct *vma) argument
1249 vma_has_reserves(struct vm_area_struct *vma, long chg) argument
1390 dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) argument
2571 alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2797 __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) argument
2877 vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2883 vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2889 vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2895 vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2901 vma_del_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
2927 restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct folio *folio) argument
3127 alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) argument
5167 hugetlb_vm_op_open(struct vm_area_struct *vma) argument
5205 hugetlb_vm_op_close(struct vm_area_struct *vma) argument
5236 hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) argument
5262 hugetlb_vm_op_pagesize(struct vm_area_struct *vma) argument
5294 make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) argument
5313 set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
5350 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) argument
5553 move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, unsigned long sz) argument
5580 move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) argument
5644 __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) argument
5812 __hugetlb_zap_begin(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
5824 __hugetlb_zap_end(struct vm_area_struct *vma, struct zap_details *details) argument
5851 unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) argument
5876 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument
5935 struct vm_area_struct *vma = vmf->vma; local
6134 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
6214 struct vm_area_struct *vma = vmf->vma; local
6446 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
6646 alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
6893 hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
7041 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) argument
7243 page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) argument
7271 want_pmd_share(struct vm_area_struct *vma, unsigned long addr) argument
7297 adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
7328 huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) argument
7385 huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
7406 huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) argument
7412 huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
7418 adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
7423 want_pmd_share(struct vm_area_struct *vma, unsigned long addr) argument
7430 huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) argument
7635 hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
7685 hugetlb_unshare_all_pmds(struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/csky/abiv1/
H A Dcacheflush.c44 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
50 flush_tlb_page(vma, addr);
63 if (vma->vm_flags & VM_EXEC)
68 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, argument
73 if (vma->vm_flags & VM_EXEC)
/linux-master/arch/csky/abiv1/inc/abi/
H A Dcacheflush.h16 #define flush_cache_page(vma, page, pfn) cache_wbinv_all()
33 static inline void flush_anon_page(struct vm_area_struct *vma, argument
41 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
44 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
53 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
58 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
/linux-master/arch/csky/abiv2/inc/abi/
H A Dcacheflush.h16 #define flush_cache_range(vma, start, end) do { } while (0)
17 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
50 if (vma->vm_flags & VM_EXEC) { \
58 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-master/arch/nios2/include/asm/
H A Dcacheflush.h26 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
36 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
44 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_ggtt_fencing.c201 struct i915_vma *vma)
210 if (vma) {
211 GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
212 !i915_gem_object_get_tiling(vma->obj));
214 if (!i915_vma_is_map_and_fenceable(vma))
219 ret = i915_vma_sync(vma);
224 GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
225 fence->start = i915_ggtt_offset(vma);
226 fence->size = vma
200 fence_update(struct i915_fence_reg *fence, struct i915_vma *vma) argument
291 i915_vma_revoke_fence(struct i915_vma *vma) argument
361 __i915_vma_pin_fence(struct i915_vma *vma) argument
423 i915_vma_pin_fence(struct i915_vma *vma) argument
[all...]
H A Dintel_renderstate.c58 d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
66 u64 r = s + i915_vma_offset(so->vma);
89 so->batch_offset = i915_ggtt_offset(so->vma);
135 __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
136 __i915_gem_object_release_map(so->vma->obj);
160 so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
161 if (IS_ERR(so->vma)) {
162 err = PTR_ERR(so->vma);
177 err = i915_gem_object_lock(so->vma->obj, &so->ww);
181 err = i915_vma_pin_ww(so->vma,
[all...]
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_plane_initial.c21 struct i915_vma **vma)
42 *vma = plane_state->ggtt_vma;
144 struct i915_vma *vma; local
229 vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
230 if (IS_ERR(vma))
237 if (i915_vma_pin(vma, 0, 0, pinctl)) {
251 !i915_vma_is_map_and_fenceable(vma))
259 i915_ggtt_offset(vma), plane_config->base);
261 return vma;
278 struct i915_vma *vma; local
18 intel_reuse_initial_plane_obj(struct intel_crtc *this, const struct intel_initial_plane_config plane_configs[], struct drm_framebuffer **fb, struct i915_vma **vma) argument
330 struct i915_vma *vma; local
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_tiling.c161 static bool i915_vma_fence_prepare(struct i915_vma *vma, argument
164 struct drm_i915_private *i915 = vma->vm->i915;
167 if (!i915_vma_is_map_and_fenceable(vma))
170 size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
171 if (i915_vma_size(vma) < size)
174 alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
175 if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
188 struct i915_vma *vma, *vn; local
197 spin_lock(&obj->vma.lock);
198 for_each_ggtt_vma(vma, ob
235 struct i915_vma *vma; local
[all...]
/linux-master/fs/proc/
H A Dtask_mmu.c132 struct vm_area_struct *vma = vma_next(&priv->iter); local
134 if (vma) {
135 *ppos = vma->vm_start;
138 vma = get_gate_vma(priv->mm);
141 return vma;
263 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) argument
266 struct mm_struct *mm = vma->vm_mm;
267 struct file *file = vma->vm_file;
268 vm_flags_t flags = vma->vm_flags;
276 const struct inode *inode = file_user_inode(vma
505 struct vm_area_struct *vma = walk->vma; local
531 struct vm_area_struct *vma = walk->vma; local
578 struct vm_area_struct *vma = walk->vma; local
619 struct vm_area_struct *vma = walk->vma; local
643 show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) argument
732 struct vm_area_struct *vma = walk->vma; local
776 smap_gather_stats(struct vm_area_struct *vma, struct mem_size_stats *mss, unsigned long start) argument
859 struct vm_area_struct *vma = v; local
889 struct vm_area_struct *vma; local
1089 pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
1105 clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) argument
1131 clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) argument
1138 clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument
1161 clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument
1171 struct vm_area_struct *vma = walk->vma; local
1231 struct vm_area_struct *vma = walk->vma; local
1261 struct vm_area_struct *vma; local
1381 struct vm_area_struct *vma = find_vma(walk->mm, addr); local
1413 pte_to_pagemap_entry(struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
1470 struct vm_area_struct *vma = walk->vma; local
1577 struct vm_area_struct *vma = walk->vma; local
1787 pagemap_page_category(struct pagemap_scan_private *p, struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
1830 make_uffd_wp_pte(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, pte_t ptent) argument
1849 pagemap_thp_category(struct pagemap_scan_private *p, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
1892 make_uffd_wp_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument
1939 make_uffd_wp_huge_pte(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t ptent) argument
2005 struct vm_area_struct *vma = walk->vma; local
2112 struct vm_area_struct *vma = walk->vma; local
2162 struct vm_area_struct *vma = walk->vma; local
2265 struct vm_area_struct *vma = walk->vma; local
2324 struct vm_area_struct *vma = walk->vma; local
2590 can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, unsigned long addr) argument
2614 can_gather_numa_stats_pmd(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) argument
2643 struct vm_area_struct *vma = walk->vma; local
2717 struct vm_area_struct *vma = v; local
[all...]
/linux-master/arch/alpha/kernel/
H A Dpci-sysfs.c19 struct vm_area_struct *vma,
29 vma->vm_pgoff += base >> PAGE_SHIFT;
31 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
32 vma->vm_end - vma->vm_start,
33 vma->vm_page_prot);
37 struct vm_area_struct *vma, int sparse)
42 nr = vma_pages(vma);
18 hose_mmap_page_range(struct pci_controller *hose, struct vm_area_struct *vma, enum pci_mmap_state mmap_type, int sparse) argument
36 __pci_mmap_fits(struct pci_dev *pdev, int num, struct vm_area_struct *vma, int sparse) argument
66 pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int sparse) argument
95 pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) argument
102 pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) argument
256 __legacy_mmap_fits(struct pci_controller *hose, struct vm_area_struct *vma, unsigned long res_size, int sparse) argument
286 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_type) argument
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dmmap.c42 static void rvt_vma_open(struct vm_area_struct *vma) argument
44 struct rvt_mmap_info *ip = vma->vm_private_data;
49 static void rvt_vma_close(struct vm_area_struct *vma) argument
51 struct rvt_mmap_info *ip = vma->vm_private_data;
64 * @vma: the VMA to be initialized
68 int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) argument
71 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
72 unsigned long size = vma->vm_end - vma->vm_start;
94 ret = remap_vmalloc_range(vma, i
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_pt.h38 __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
43 __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
46 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
/linux-master/include/linux/
H A Dhugetlb.h113 struct vm_area_struct *vma; member in struct:hugetlb_vma_lock
128 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
129 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
130 int move_hugetlb_page_tables(struct vm_area_struct *vma,
136 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
143 struct vm_area_struct *vma,
150 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
161 struct vm_area_struct *vma,
175 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
178 struct vm_area_struct *vma,
257 hugetlb_zap_begin(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
264 hugetlb_zap_end(struct vm_area_struct *vma, struct zap_details *details) argument
287 hugetlb_dup_vma_private(struct vm_area_struct *vma) argument
291 clear_vma_resv_huge_pages(struct vm_area_struct *vma) argument
306 huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
313 adjust_range_if_pmd_sharing_possible( struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
319 hugetlb_zap_begin( struct vm_area_struct *vma, unsigned long *start, unsigned long *end) argument
325 hugetlb_zap_end( struct vm_area_struct *vma, struct zap_details *details) argument
340 move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) argument
369 hugetlb_vma_lock_read(struct vm_area_struct *vma) argument
373 hugetlb_vma_unlock_read(struct vm_area_struct *vma) argument
377 hugetlb_vma_lock_write(struct vm_area_struct *vma) argument
381 hugetlb_vma_unlock_write(struct vm_area_struct *vma) argument
385 hugetlb_vma_trylock_write(struct vm_area_struct *vma) argument
390 hugetlb_vma_assert_locked(struct vm_area_struct *vma) argument
451 hugetlb_change_protection( struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument
459 __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) argument
467 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
475 hugetlb_unshare_all_pmds(struct vm_area_struct *vma) argument
770 hstate_vma(struct vm_area_struct *vma) argument
1003 huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
1012 huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) argument
1271 __vma_shareable_lock(struct vm_area_struct *vma) argument
1283 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) argument
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_gem.c97 struct i915_vma *vma; local
104 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
105 if (i915_vma_is_pinned(vma))
106 pinned += vma->node.size;
123 struct i915_vma *vma; local
128 if (list_empty(&obj->vma.list))
141 spin_lock(&obj->vma.lock);
142 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
145 list_move_tail(&vma
309 struct i915_vma *vma; local
364 i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, struct drm_mm_node *node, struct i915_vma *vma) argument
390 struct i915_vma *vma; local
553 struct i915_vma *vma; local
892 discard_ggtt_vma(struct i915_vma *vma) argument
912 struct i915_vma *vma; local
[all...]
/linux-master/arch/parisc/include/asm/
H A Dcacheflush.h61 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
70 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
72 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
74 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
76 void flush_cache_range(struct vm_area_struct *vma,
80 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
/linux-master/fs/
H A Duserfaultfd.c94 * meaningful when userfaultfd_wp()==true on the vma and when it's
97 bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) argument
99 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
107 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, argument
110 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
112 vm_flags_reset(vma, flags);
116 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
118 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
119 vma_set_page_prot(vma);
249 struct vm_area_struct *vma local
380 struct vm_area_struct *vma = vmf->vma; local
618 struct vm_area_struct *vma; local
655 dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) argument
734 mremap_userfaultfd_prep(struct vm_area_struct *vma, struct vm_userfaultfd_ctx *vm_ctx) argument
783 userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
824 userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *unmaps) argument
873 struct vm_area_struct *vma, *prev; local
1296 struct vm_area_struct *vma, *prev, *cur; local
1519 struct vm_area_struct *vma, *prev, *cur; local
1966 userfaultfd_wp_async(struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/x86/mm/
H A Dpkeys.c7 #include <linux/mm_types.h> /* mm_struct, vma, etc... */
62 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) argument
65 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
67 if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
76 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) argument
92 pkey = execute_only_pkey(vma->vm_mm);
95 } else if (vma_is_pkey_exec_only(vma)) {
110 return vma_pkey(vma);

Completed in 505 milliseconds

1234567891011>>