Searched refs:vma (Results 126 - 150 of 1003) sorted by relevance

1234567891011>>

/linux-master/arch/parisc/include/asm/
H A Dcacheflush.h62 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
71 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
73 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
75 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
77 void flush_cache_range(struct vm_area_struct *vma,
84 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
/linux-master/mm/
H A Dinternal.h213 struct vm_area_struct *vma,
687 extern long populate_vma_page_range(struct vm_area_struct *vma,
710 folio_within_range(struct folio *folio, struct vm_area_struct *vma, argument
714 unsigned long vma_pglen = vma_pages(vma);
720 if (start < vma->vm_start)
721 start = vma->vm_start;
723 if (end > vma->vm_end)
724 end = vma->vm_end;
728 /* if folio start address is not in vma range */
729 if (!in_range(pgoff, vma
738 folio_within_vma(struct folio *folio, struct vm_area_struct *vma) argument
753 mlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) argument
769 munlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) argument
797 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, struct vm_area_struct *vma) argument
823 vma_address(struct page *page, struct vm_area_struct *vma) argument
835 struct vm_area_struct *vma = pvmw->vma; local
1157 gup_must_unshare(struct vm_area_struct *vma, unsigned int flags, struct page *page) argument
1215 vma_set_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) argument
1224 vma_soft_dirty_enabled(struct vm_area_struct *vma) argument
1251 vma_iter_prealloc(struct vma_iterator *vmi, struct vm_area_struct *vma) argument
1268 vma_iter_store(struct vma_iterator *vmi, struct vm_area_struct *vma) argument
1295 vma_iter_store_gfp(struct vma_iterator *vmi, struct vm_area_struct *vma, gfp_t gfp) argument
1314 struct vm_area_struct *vma; member in struct:vma_prepare
[all...]
/linux-master/arch/x86/mm/
H A Dpkeys.c7 #include <linux/mm_types.h> /* mm_struct, vma, etc... */
62 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) argument
65 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
67 if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
76 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) argument
92 pkey = execute_only_pkey(vma->vm_mm);
95 } else if (vma_is_pkey_exec_only(vma)) {
110 return vma_pkey(vma);
/linux-master/include/trace/events/
H A Dmmap.h72 TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
74 TP_ARGS(mt, vma),
78 __field(struct vm_area_struct *, vma)
85 __entry->vma = vma;
86 __entry->vm_start = vma->vm_start;
87 __entry->vm_end = vma->vm_end - 1;
91 __entry->mt, __entry->vma,
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_mman.h21 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
32 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma);
H A Di915_gem_mman.c30 __vma_matches(struct vm_area_struct *vma, struct file *filp, argument
33 if (vma->vm_file != filp)
36 return vma->vm_start == addr &&
37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
107 struct vm_area_struct *vma; local
113 vma = find_vma(mm, addr);
114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
115 vma
306 struct i915_vma *vma; local
508 struct i915_vma *vma; local
868 vm_open(struct vm_area_struct *vma) argument
877 vm_close(struct vm_area_struct *vma) argument
937 i915_gem_object_mmap(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo, struct vm_area_struct *vma) argument
1020 i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) argument
1063 i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/arm/include/asm/
H A Dcacheflush.h67 * - flags - vma->vm_flags field
170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
227 struct mm_struct *mm = vma->vm_mm;
231 vma->vm_flags);
234 static inline void vivt_flush_cache_pages(struct vm_area_struct *vma, argument
237 struct mm_struct *mm = vma->vm_mm;
242 vma->vm_flags);
249 #define flush_cache_range(vma,start,end) \
250 vivt_flush_cache_range(vma,star
312 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) argument
[all...]
H A Dpage.h113 unsigned long vaddr, struct vm_area_struct *vma);
117 unsigned long vaddr, struct vm_area_struct *vma);
120 unsigned long vaddr, struct vm_area_struct *vma);
123 unsigned long vaddr, struct vm_area_struct *vma);
126 unsigned long vaddr, struct vm_area_struct *vma);
129 unsigned long vaddr, struct vm_area_struct *vma);
132 unsigned long vaddr, struct vm_area_struct *vma);
135 unsigned long vaddr, struct vm_area_struct *vma);
151 unsigned long vaddr, struct vm_area_struct *vma);
158 #define copy_user_highpage(to,from,vaddr,vma) \
[all...]
/linux-master/drivers/sbus/char/
H A Dflash.c34 flash_mmap(struct file *file, struct vm_area_struct *vma) argument
44 if ((vma->vm_flags & VM_READ) &&
45 (vma->vm_flags & VM_WRITE)) {
49 if (vma->vm_flags & VM_READ) {
52 } else if (vma->vm_flags & VM_WRITE) {
62 if ((vma->vm_pgoff << PAGE_SHIFT) > size)
64 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT);
66 if (vma->vm_end - (vma->vm_start + (vma
[all...]
/linux-master/fs/
H A Duserfaultfd.c93 * meaningful when userfaultfd_wp()==true on the vma and when it's
96 bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) argument
98 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
106 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, argument
109 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
111 vm_flags_reset(vma, flags);
115 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
117 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
118 vma_set_page_prot(vma);
248 struct vm_area_struct *vma local
379 struct vm_area_struct *vma = vmf->vma; local
617 struct vm_area_struct *vma; local
654 dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) argument
730 mremap_userfaultfd_prep(struct vm_area_struct *vma, struct vm_userfaultfd_ctx *vm_ctx) argument
779 userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
820 userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *unmaps) argument
869 struct vm_area_struct *vma, *prev; local
1288 struct vm_area_struct *vma, *prev, *cur; local
1511 struct vm_area_struct *vma, *prev, *cur; local
1958 userfaultfd_wp_async(struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/microblaze/mm/
H A Dfault.c89 struct vm_area_struct *vma; local
150 vma = find_vma(mm, address);
151 if (unlikely(!vma))
154 if (vma->vm_start <= address)
157 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
172 if (unlikely(address + 0x100000 < vma->vm_end)) {
195 vma = expand_stack(mm, address);
196 if (!vma)
204 if (unlikely(!(vma->vm_flags & VM_WRITE)))
212 if (unlikely(!(vma
[all...]
/linux-master/arch/openrisc/mm/
H A Dfault.c51 struct vm_area_struct *vma; local
111 vma = find_vma(mm, address);
113 if (!vma)
116 if (vma->vm_start <= address)
119 if (!(vma->vm_flags & VM_GROWSDOWN))
132 vma = expand_stack(mm, address);
133 if (!vma)
147 if (!(vma->vm_flags & VM_WRITE))
152 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
157 if ((vector == 0x400) && !(vma
[all...]
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_client_blt.c94 struct i915_vma *vma; member in struct:blit_buffer
129 if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
133 if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
193 *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
194 *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
197 *cs++ = lower_32_bits(i915_vma_offset(src->vma));
198 *cs++ = upper_32_bits(i915_vma_offset(src->vma));
239 *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
241 *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
244 *cs++ = lower_32_bits(i915_vma_offset(src->vma));
273 struct i915_vma *vma; local
315 struct i915_vma *vma; local
451 pin_buffer(struct i915_vma *vma, u64 addr) argument
[all...]
/linux-master/arch/m68k/include/asm/
H A Dcacheflush_mm.h207 static inline void flush_cache_range(struct vm_area_struct *vma, argument
211 if (vma->vm_mm == current->mm)
215 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) argument
217 if (vma->vm_mm == current->mm)
263 #define flush_icache_pages(vma, page, nr) \
266 extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
272 static inline void copy_to_user_page(struct vm_area_struct *vma, argument
276 flush_cache_page(vma, vaddr, page_to_pfn(page));
278 flush_icache_user_page(vma, page, vaddr, len);
280 static inline void copy_from_user_page(struct vm_area_struct *vma, argument
[all...]
/linux-master/arch/mips/kernel/
H A Dvdso.c94 struct vm_area_struct *vma; local
151 vma = _install_special_mapping(mm, base, vvar_size,
154 if (IS_ERR(vma)) {
155 ret = PTR_ERR(vma);
164 ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
165 pgprot_noncached(vma->vm_page_prot));
171 ret = remap_pfn_range(vma, data_addr,
173 PAGE_SIZE, vma->vm_page_prot);
178 vma = _install_special_mapping(mm, vdso_addr, image->size,
182 if (IS_ERR(vma)) {
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_debugfs.c135 static const char *stringify_vma_type(const struct i915_vma *vma) argument
137 if (i915_vma_is_ggtt(vma))
140 if (i915_vma_is_dpt(vma))
194 struct i915_vma *vma; local
211 spin_lock(&obj->vma.lock);
212 list_for_each_entry(vma, &obj->vma.list, obj_link) {
213 if (!drm_mm_node_allocated(&vma->node))
216 spin_unlock(&obj->vma.lock);
218 if (i915_vma_is_pinned(vma))
[all...]
/linux-master/arch/riscv/mm/
H A Dtlbflush.c13 __asm__ __volatile__ ("sfence.vma x0, %0"
25 __asm__ __volatile__ ("sfence.vma %0, %1"
162 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) argument
164 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
168 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
173 if (!is_vm_hugetlb_page(vma)) {
176 stride_size = huge_page_size(hstate_vma(vma));
197 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma
208 flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c49 * @uvma: The userptr vma
51 * Check if the userptr vma has been invalidated since last successful
54 * vma userptr will remain valid after a lockless check, so typically
57 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
69 struct xe_vma *vma = &uvma->vma; local
70 struct xe_vm *vm = xe_vma_vm(vma);
72 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
77 bool read_only = xe_vma_read_only(vma);
80 xe_assert(xe, xe_vma_is_userptr(vma));
673 struct xe_vma *vma = &uvma->vma; local
813 struct xe_vma *vma, *next; local
838 xe_vma_free(struct xe_vma *vma) argument
856 struct xe_vma *vma; local
951 xe_vma_destroy_late(struct xe_vma *vma) argument
992 struct xe_vma *vma = local
1001 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb); local
1007 xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) argument
1050 xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma) argument
1065 xe_vma_destroy_unlocked(struct xe_vma *vma) argument
1100 xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1115 xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1229 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, u16 pat_index, u32 pt_level) argument
1496 struct xe_vma *vma, *next_vma; local
1655 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
1741 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
1830 __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1871 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1891 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
2053 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, u32 region, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
2090 prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) argument
2106 struct xe_vma *vma; local
2231 struct xe_vma *vma; local
2279 xe_vma_max_pte_size(struct xe_vma *vma) argument
2293 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) argument
2389 struct xe_vma *vma; local
2533 op_execute(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2621 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2673 struct xe_vma *vma; local
2735 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); local
2748 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va); local
3250 xe_vm_invalidate_vma(struct xe_vma *vma) argument
3321 struct xe_vma *vma = gpuva_to_vma(gpuva); local
3386 struct xe_vma *vma = gpuva_to_vma(gpuva); local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dselftest_ring_submission.c12 struct i915_vma *vma; local
20 vma = i915_vma_instance(obj, engine->gt->vm, NULL);
21 if (IS_ERR(vma)) {
23 return vma;
26 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
32 err = i915_vma_sync(vma);
53 *cs++ = i915_vma_offset(vma) + 4000;
61 vma->private = intel_context_create(engine); /* dummy residuals */
62 if (IS_ERR(vma->private)) {
63 vma
[all...]
/linux-master/drivers/gpu/drm/virtio/
H A Dvirtgpu_vram.c33 struct vm_area_struct *vma)
39 unsigned long vm_size = vma->vm_end - vma->vm_start;
48 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
49 vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
50 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
51 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
52 vma
32 virtio_gpu_vram_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) argument
[all...]
/linux-master/drivers/accel/habanalabs/common/
H A Dmemory_mgr.c195 * @vma: the vma object for which mmap was closed.
199 static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma) argument
202 (struct hl_mmap_mem_buf *)vma->vm_private_data;
205 new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
214 vma->vm_private_data = NULL;
225 * @vma: the vma object for which mmap was closed.
228 * Map the buffer specified by the vma
230 hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, void *args) argument
[all...]
/linux-master/arch/arm/mm/
H A Dfault-armv.c36 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, argument
52 flush_cache_page(vma, address, pfn);
57 set_pte_at(vma->vm_mm, address, ptep, entry);
58 flush_tlb_page(vma, address);
88 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, argument
99 pgd = pgd_offset(vma->vm_mm, address);
120 pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
126 ret = do_adjust_pte(vma, address, pfn, pte);
135 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, argument
138 struct mm_struct *mm = vma
183 update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned int nr) argument
[all...]
/linux-master/arch/s390/kernel/
H A Dvdso.c55 struct vm_area_struct *vma; local
58 for_each_vma(vmi, vma) {
59 if (!vma_is_special_mapping(vma, &vvar_mapping))
61 zap_vma_pages(vma);
70 struct vm_area_struct *vma, struct vm_fault *vmf)
72 struct page *timens_page = find_timens_vvar_page(vma);
85 err = vmf_insert_pfn(vma, addr, pfn);
108 return vmf_insert_pfn(vma, vmf->address, pfn);
112 struct vm_area_struct *vma)
114 current->mm->context.vdso_base = vma
69 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
111 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *vma) argument
145 struct vm_area_struct *vma; local
[all...]
/linux-master/drivers/dax/
H A Ddevice.c17 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, argument
27 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
35 if (vma->vm_start & mask || vma->vm_end & mask) {
37 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
38 current->comm, func, vma->vm_start, vma->vm_end,
43 if (!vma_is_dax(vma)) {
45 "%s: %s: fail, vma is not DAX capable\n",
80 struct file *filp = vmf->vma
262 dev_dax_may_split(struct vm_area_struct *vma, unsigned long addr) argument
272 dev_dax_pagesize(struct vm_area_struct *vma) argument
287 dax_mmap(struct file *filp, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/parisc/mm/
H A Dfault.c124 /* This is the treewalk to find a vma which is the highest that has
244 struct vm_area_struct *vma)
258 vma ? ',':'\n');
260 if (vma)
262 vma->vm_start, vma->vm_end);
270 struct vm_area_struct *vma, *prev_vma; local
295 vma = find_vma_prev(mm, address, &prev_vma);
296 if (!vma || address < vma
242 show_signal_msg(struct pt_regs *regs, unsigned long code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma) argument
457 struct vm_area_struct *vma; local
[all...]

Completed in 426 milliseconds

1234567891011>>