Searched refs:vma (Results 376 - 400 of 1013) sorted by relevance

<<11121314151617181920>>

/linux-master/arch/powerpc/mm/book3s64/
H A Dsubpage_prot.c141 struct vm_area_struct *vma = walk->vma; local
142 split_huge_pmd(vma, pmd, addr);
154 struct vm_area_struct *vma; local
158 * We don't try too hard, we just mark all the vma in that range
161 for_each_vma_range(vmi, vma, addr + len) {
162 vm_flags_set(vma, VM_NOHUGEPAGE);
163 walk_page_vma(vma, &subpage_walk_ops, NULL);
/linux-master/arch/xtensa/mm/
H A Dfault.c90 struct vm_area_struct * vma; local
134 vma = lock_mm_and_find_vma(mm, address, regs);
135 if (!vma)
145 if (!(vma->vm_flags & VM_WRITE))
149 if (!(vma->vm_flags & VM_EXEC))
152 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
159 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/arch/loongarch/mm/
H A Dfault.c141 struct vm_area_struct *vma = NULL; local
178 vma = lock_mm_and_find_vma(mm, address, regs);
179 if (unlikely(!vma))
202 if (!(vma->vm_flags & VM_WRITE))
205 if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
207 if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
216 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/drivers/gpu/drm/xen/
H A Dxen_drm_front_gem.c61 struct vm_area_struct *vma)
66 vma->vm_ops = gem_obj->funcs->vm_ops;
73 vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
74 vma->vm_pgoff = 0;
83 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
93 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
95 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
60 xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma) argument
/linux-master/arch/x86/mm/
H A Dfault.c840 struct vm_area_struct *vma, u32 pkey, int si_code)
849 vma_end_read(vma);
855 struct vm_area_struct *vma)
865 if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
874 struct vm_area_struct *vma)
881 if (bad_area_access_from_pkeys(error_code, vma)) {
886 * the vma and passes it to userspace so userspace can discover
899 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
902 u32 pkey = vma_pkey(vma);
904 __bad_area(regs, error_code, address, mm, vma, pke
838 __bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct mm_struct *mm, struct vm_area_struct *vma, u32 pkey, int si_code) argument
854 bad_area_access_from_pkeys(unsigned long error_code, struct vm_area_struct *vma) argument
872 bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct mm_struct *mm, struct vm_area_struct *vma) argument
1053 access_error(unsigned long error_code, struct vm_area_struct *vma) argument
1215 struct vm_area_struct *vma; local
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_file_ops.c708 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, argument
715 if ((vma->vm_end - vma->vm_start) > len) {
718 vma->vm_end - vma->vm_start, len);
728 if (vma->vm_flags & VM_WRITE) {
736 vm_flags_clear(vma, VM_MAYWRITE);
740 ret = remap_pfn_range(vma, vma->vm_start, pfn,
741 len, vma
750 mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, u64 ureg) argument
781 mmap_piobufs(struct vm_area_struct *vma, struct qib_devdata *dd, struct qib_ctxtdata *rcd, unsigned piobufs, unsigned piocnt) argument
826 mmap_rcvegrbufs(struct vm_area_struct *vma, struct qib_ctxtdata *rcd) argument
892 mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct qib_ctxtdata *rcd, unsigned subctxt) argument
974 qib_mmapf(struct file *fp, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/sh/mm/
H A Dfault.c358 static inline int access_error(int error_code, struct vm_area_struct *vma) argument
362 if (unlikely(!(vma->vm_flags & VM_WRITE)))
369 !(vma->vm_flags & VM_EXEC)))
373 if (unlikely(!vma_is_accessible(vma)))
396 struct vm_area_struct * vma; local
442 vma = lock_mm_and_find_vma(mm, address, regs);
443 if (unlikely(!vma)) {
452 if (unlikely(access_error(error_code, vma))) {
469 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/arch/arm/kernel/
H A Dprocess.c336 #define is_gate_vma(vma) ((vma) == &gate_vma)
338 #define is_gate_vma(vma) 0
341 const char *arch_vma_name(struct vm_area_struct *vma) argument
343 return is_gate_vma(vma) ? "[vectors]" : NULL;
398 struct vm_area_struct *vma; local
421 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
425 if (IS_ERR(vma)) {
426 ret = PTR_ERR(vma);
/linux-master/drivers/soc/aspeed/
H A Daspeed-p2a-ctrl.c100 static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma) argument
110 vsize = vma->vm_end - vma->vm_start;
111 prot = vma->vm_page_prot;
113 if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
119 if (remap_pfn_range(vma, vma->vm_start,
120 (ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
/linux-master/drivers/platform/x86/intel/pmt/
H A Dclass.c93 struct bin_attribute *attr, struct vm_area_struct *vma)
98 unsigned long vsize = vma->vm_end - vma->vm_start;
104 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
113 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
114 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
115 vsize, vma->vm_page_prot))
92 intel_pmt_mmap(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) argument
/linux-master/arch/arm64/kernel/
H A Dvdso.c132 struct vm_area_struct *vma; local
137 for_each_vma(vmi, vma) {
138 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
139 zap_vma_pages(vma);
141 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
142 zap_vma_pages(vma);
152 struct vm_area_struct *vma, struct vm_fault *vmf)
154 struct page *timens_page = find_timens_vvar_page(vma);
182 return vmf_insert_pfn(vma, vmf->address, pfn);
151 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
/linux-master/drivers/gpu/drm/xe/
H A Dxe_gt_tlb_invalidation.c270 * @vma: VMA to invalidate
282 struct xe_vma *vma)
289 xe_gt_assert(gt, vma);
304 u64 start = xe_vma_start(vma);
305 u64 length = xe_vma_size(vma);
318 start = ALIGN_DOWN(xe_vma_start(vma), align);
319 end = ALIGN(xe_vma_end(vma), align);
323 start = ALIGN_DOWN(xe_vma_start(vma), length);
332 start = ALIGN_DOWN(xe_vma_start(vma), length);
341 action[len++] = xe_vma_vm(vma)
280 xe_gt_tlb_invalidation_vma(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, struct xe_vma *vma) argument
[all...]
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_overlay.c189 struct i915_vma *vma; member in struct:intel_overlay
289 struct i915_vma *vma)
296 if (vma)
297 frontbuffer = intel_frontbuffer_get(vma->obj);
309 overlay->old_vma = overlay->vma;
310 if (vma)
311 overlay->vma = i915_vma_get(vma);
313 overlay->vma = NULL;
318 struct i915_vma *vma,
288 intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) argument
317 intel_overlay_continue(struct intel_overlay *overlay, struct i915_vma *vma, bool load_polyphase_filter) argument
358 struct i915_vma *vma; local
765 struct i915_vma *vma; local
797 struct i915_vma *vma; local
1355 struct i915_vma *vma; local
[all...]
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dhash-4k.h74 #define remap_4k_pfn(vma, addr, pfn, prot) \
75 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
148 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
/linux-master/arch/m68k/include/asm/
H A Dpgtable_mm.h140 struct vm_area_struct *vma, unsigned long address,
145 #define update_mmu_cache(vma, addr, ptep) \
146 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
139 update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) argument
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_migrate.c113 struct i915_vma *vma,
122 if (vma) {
123 err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
129 pr_err("Failed to pin vma.\n");
133 i915_vma_unpin(vma);
191 struct i915_vma *vma = NULL; local
204 vma = i915_vma_instance(obj, vm, NULL);
205 if (IS_ERR(vma)) {
206 err = PTR_ERR(vma);
235 if (!vma) {
111 lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, struct i915_vma *vma, bool silent_migrate) argument
[all...]
/linux-master/include/drm/
H A Ddrm_gem_dma_helper.h43 int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
121 * @vma: VMA for the area to be mapped
129 static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) argument
133 return drm_gem_dma_mmap(dma_obj, vma);
/linux-master/arch/powerpc/mm/nohash/
H A Dtlb.c145 * - flush_tlb_page(vma, vmaddr) flushes one page
146 * - flush_tlb_range(vma, start, end) flushes a range of pages
181 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) argument
183 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
307 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) argument
310 if (vma && is_vm_hugetlb_page(vma))
311 flush_hugetlb_page(vma, vmaddr);
314 __flush_tlb_page(vma
345 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/fs/9p/
H A Dvfs_file.c457 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) argument
467 return generic_file_readonly_mmap(filp, vma);
470 retval = generic_file_mmap(filp, vma);
472 vma->vm_ops = &v9fs_mmap_file_vm_ops;
483 static void v9fs_mmap_vm_close(struct vm_area_struct *vma) argument
490 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
492 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
493 (vma->vm_end - vma->vm_start - 1),
496 if (!(vma
[all...]
/linux-master/arch/arc/include/asm/
H A Dpgtable-bits-arcv2.h104 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
107 #define update_mmu_cache(vma, addr, ptep) \
108 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
/linux-master/arch/alpha/kernel/
H A Dsmp.c661 struct vm_area_struct *vma; member in struct:flush_tlb_page_struct
673 flush_tlb_current_page(mm, data->vma, data->addr);
679 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) argument
682 struct mm_struct *mm = vma->vm_mm;
687 flush_tlb_current_page(mm, vma, addr);
701 data.vma = vma;
712 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
715 flush_tlb_mm(vma->vm_mm);
730 flush_icache_user_page(struct vm_area_struct *vma, struc argument
[all...]
/linux-master/arch/powerpc/mm/
H A Dcacheflush.c206 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
213 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, argument
/linux-master/arch/x86/kernel/cpu/sgx/
H A Dencl.c294 * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
316 * @vma: VMA obtained from fault info from where page is accessed
327 static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, argument
408 vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
433 struct vm_area_struct *vma = vmf->vma; local
439 encl = vma->vm_private_data;
458 return sgx_encl_eaug_page(vma, encl, addr);
462 entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
474 ret = vmf_insert_pfn(vma, add
487 sgx_vma_open(struct vm_area_struct *vma) argument
570 sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long newflags) argument
628 sgx_vma_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
1140 struct vm_area_struct *vma; local
1203 struct vm_area_struct *vma; local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dselftest_rps.c70 struct i915_vma *vma; local
82 vma = i915_vma_instance(obj, vm, NULL);
83 if (IS_ERR(vma)) {
84 err = PTR_ERR(vma);
88 err = i915_vma_pin(vma, 0, 0, PIN_USER);
92 i915_vma_lock(vma);
126 *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
127 *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
132 *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
133 *cs++ = upper_32_bits(i915_vma_offset(vma)
634 struct i915_vma *vma; local
773 struct i915_vma *vma; local
[all...]
/linux-master/mm/
H A Dhmm.c67 struct vm_area_struct *vma = walk->vma; local
74 if (!(vma->vm_flags & VM_WRITE))
80 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
161 if (!walk->vma) {
300 if (!vm_normal_page(walk->vma, addr, pte) &&
418 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
475 struct vm_area_struct *vma = walk->vma; local
482 ptl = huge_pte_lock(hstate_vma(vma), wal
525 struct vm_area_struct *vma = walk->vma; local
[all...]

Completed in 506 milliseconds

<<11121314151617181920>>