Searched refs:vma (Results 276 - 300 of 1003) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/misc/cxl/
H A Dcontext.c128 struct vm_area_struct *vma = vmf->vma; local
129 struct cxl_context *ctx = vma->vm_file->private_data;
162 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
168 ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
180 * Map a per-context mmio space into the given vma.
182 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) argument
184 u64 start = vma->vm_pgoff << PAGE_SHIFT;
185 u64 len = vma
[all...]
/linux-master/drivers/soc/qcom/
H A Drmtfs_mem.c132 static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma) argument
136 if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
139 vma->vm_end, vma->vm_start,
140 (vma->vm_end - vma->vm_start), &rmtfs_mem->size);
144 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
145 return remap_pfn_range(vma,
[all...]
/linux-master/include/asm-generic/
H A Dtlb.h280 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
294 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } argument
423 struct vm_area_struct vma = { local
429 flush_tlb_range(&vma, tlb->start, tlb->end);
437 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) argument
450 tlb->vma_huge = is_vm_hugetlb_page(vma);
451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
535 * In the case of tlb vma handling, we can optimise these away in the
539 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) argument
550 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) argument
[all...]
/linux-master/mm/
H A Dmempolicy.c384 * Per-vma policies are protected by mmap_lock. Allocations using per-task
411 * Rebind each vma in mm to new nodemask.
417 struct vm_area_struct *vma; local
421 for_each_vma(vmi, vma) {
422 vma_start_write(vma);
423 mpol_rebind_policy(vma->vm_policy, new);
520 !vma_migratable(walk->vma) ||
538 struct vm_area_struct *vma = walk->vma; local
546 ptl = pmd_trans_huge_lock(pmd, vma);
671 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
692 struct vm_area_struct *next, *vma = walk->vma; local
788 vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) argument
818 mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, struct mempolicy *new_pol) argument
931 struct vm_area_struct *vma = NULL; local
1066 struct vm_area_struct *vma; local
1267 struct vm_area_struct *vma, *prev; local
1539 struct vm_area_struct *vma, *prev; local
1766 vma_migratable(struct vm_area_struct *vma) argument
1794 __get_vma_policy(struct vm_area_struct *vma, unsigned long addr, pgoff_t *ilx) argument
1817 get_vma_policy(struct vm_area_struct *vma, unsigned long addr, int order, pgoff_t *ilx) argument
1833 vma_policy_mof(struct vm_area_struct *vma) argument
2102 huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) argument
2295 vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage) argument
2731 mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) argument
2979 mpol_set_shared_policy(struct shared_policy *sp, struct vm_area_struct *vma, struct mempolicy *pol) argument
[all...]
H A Dmapping_dirty_helpers.c41 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
44 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
97 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
98 walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
99 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
102 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
180 flush_cache_range(walk->vma, start, end);
203 flush_tlb_range(walk->vma, wpwalk->range.start,
206 flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
221 unsigned long vm_flags = READ_ONCE(walk->vma
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_ring_submission.c64 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
139 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
223 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
271 i915_ggtt_offset(ring->vma));
531 struct i915_vma *vma; local
556 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
557 if (IS_ERR(vma)) {
558 err = PTR_ERR(vma);
562 return vma;
580 struct i915_vma *vma; local
1243 gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, struct i915_vma * const vma) argument
1249 gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, struct i915_gem_ww_ctx *ww, struct i915_vma *vma) argument
1278 struct i915_vma *vma; local
[all...]
/linux-master/fs/
H A Dcoredump.c1041 static bool always_dump_vma(struct vm_area_struct *vma) argument
1044 if (vma == get_gate_vma(vma->vm_mm))
1051 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1058 if (arch_vma_name(vma))
1067 * Decide how much of @vma's contents should be included in a core dump.
1069 static unsigned long vma_dump_size(struct vm_area_struct *vma, argument
1152 coredump_next_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *gate_vma) argument
1185 struct vm_area_struct *gate_vma, *vma = NULL; local
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_object.c104 spin_lock_init(&obj->vma.lock);
105 INIT_LIST_HEAD(&obj->vma.list);
272 struct i915_vma *vma; local
276 * vma, in the same fd namespace, by virtue of flink/open.
280 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
281 if (vma) {
282 GEM_BUG_ON(vma->obj != obj);
283 GEM_BUG_ON(!atomic_read(&vma->open_count));
284 i915_vma_close(vma);
346 if (!list_empty(&obj->vma
347 struct i915_vma *vma; local
589 struct i915_vma *vma; local
[all...]
/linux-master/arch/xtensa/mm/
H A Dcache.c102 unsigned long vaddr, struct vm_area_struct *vma)
182 void local_flush_cache_range(struct vm_area_struct *vma, argument
197 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, argument
214 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
228 flush_tlb_page(vma, addr + i * PAGE_SIZE);
252 && (vma->vm_flags & VM_EXEC) != 0) {
271 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, argument
301 if ((vma->vm_flags & VM_EXEC) != 0)
305 } else if ((vma->vm_flags & VM_EXEC) != 0) {
311 extern void copy_from_user_page(struct vm_area_struct *vma, struc argument
101 copy_user_highpage(struct page *dst, struct page *src, unsigned long vaddr, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/loongarch/mm/
H A Dtlb.c57 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
60 struct mm_struct *mm = vma->vm_mm;
113 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
117 if (asid_valid(vma->vm_mm, cpu)) {
120 newpid = cpu_asid(cpu, vma->vm_mm);
124 cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
138 static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
166 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
177 if (current->active_mm != vma->vm_mm)
181 __update_hugetlb(vma, addres
[all...]
/linux-master/drivers/gpu/drm/gma500/
H A Dfbdev.c25 struct vm_area_struct *vma = vmf->vma; local
26 struct fb_info *info = vma->vm_private_data;
30 unsigned long page_num = vma_pages(vma);
33 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
36 err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
96 static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) argument
98 if (vma->vm_pgoff != 0)
100 if (vma
[all...]
/linux-master/drivers/video/fbdev/
H A Dsbuslib.c45 struct vm_area_struct *vma)
52 if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
55 size = vma->vm_end - vma->vm_start;
56 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
59 off = vma->vm_pgoff << PAGE_SHIFT;
63 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
64 vma->vm_page_prot = pgprot_noncached(vma
41 sbusfb_mmap_helper(struct sbus_mmap_map *map, unsigned long physbase, unsigned long fbsize, unsigned long iospace, struct vm_area_struct *vma) argument
[all...]
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_dpt.c20 struct i915_vma *vma; member in struct:i915_dpt
129 struct i915_vma *vma; local
146 vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
148 if (IS_ERR(vma)) {
149 err = PTR_ERR(vma);
153 iomem = i915_vma_pin_iomap(vma);
154 i915_vma_unpin(vma);
161 dpt->vma = vma;
164 i915_vma_get(vma);
[all...]
/linux-master/arch/powerpc/platforms/powernv/
H A Dopal-prd.c110 * @vma: VMA to map the registers into
113 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) argument
119 vma->vm_start, vma->vm_end, vma->vm_pgoff,
120 vma->vm_flags);
122 addr = vma->vm_pgoff << PAGE_SHIFT;
123 size = vma->vm_end - vma->vm_start;
129 page_prot = phys_mem_access_prot(file, vma
[all...]
/linux-master/drivers/char/
H A Dmem.c325 static inline int private_mapping_ok(struct vm_area_struct *vma) argument
327 return is_nommu_shared_mapping(vma->vm_flags);
331 static inline int private_mapping_ok(struct vm_area_struct *vma) argument
343 static int mmap_mem(struct file *file, struct vm_area_struct *vma) argument
345 size_t size = vma->vm_end - vma->vm_start;
346 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
349 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
356 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
359 if (!private_mapping_ok(vma))
520 mmap_zero(struct file *file, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/xtensa/kernel/
H A Dsmp.c471 struct vm_area_struct *vma; member in struct:flush_data
499 local_flush_tlb_page(fd->vma, fd->addr1);
502 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) argument
505 .vma = vma,
514 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
517 void flush_tlb_range(struct vm_area_struct *vma, argument
521 .vma = vma,
558 local_flush_cache_page(fd->vma, f
561 flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) argument
578 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/arch/loongarch/kernel/
H A Dsmp.c624 struct vm_area_struct *vma; member in struct:flush_tlb_data
633 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
636 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
638 struct mm_struct *mm = vma->vm_mm;
643 .vma = vma,
656 local_flush_tlb_range(vma, start, end);
682 local_flush_tlb_page(fd->vma, fd->addr1);
685 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
688 if ((atomic_read(&vma
[all...]
/linux-master/arch/powerpc/mm/book3s64/
H A Dhugetlbpage.c128 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, argument
137 pte_val = pte_update(vma->vm_mm, addr, ptep,
143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, argument
149 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
152 psize = huge_page_size(hstate_vma(vma));
153 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
/linux-master/tools/testing/selftests/bpf/progs/
H A Dlsm.c89 int BPF_PROG(test_int_hook, struct vm_area_struct *vma, argument
98 is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
99 vma->vm_end >= vma->vm_mm->start_stack);
121 bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start);
/linux-master/arch/powerpc/include/asm/
H A Dpgtable.h49 #define update_mmu_cache(vma, addr, ptep) \
50 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
118 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
132 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
144 struct vm_area_struct *vma, unsigned long address,
149 __update_mmu_cache(vma, address, ptep);
143 update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) argument
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_gem.c44 struct vm_area_struct *vma = vmf->vma; local
45 struct ttm_buffer_object *bo = vma->vm_private_data;
58 prot = vm_get_page_prot(vma->vm_flags);
108 struct nouveau_vma *vma; local
130 ret = nouveau_vma_new(nvbo, vmm, &vma);
142 struct nouveau_vma *vma; member in struct:nouveau_gem_object_unmap
146 nouveau_gem_object_delete(struct nouveau_vma *vma) argument
148 nouveau_fence_unref(&vma->fence);
149 nouveau_vma_del(&vma);
162 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) argument
193 struct nouveau_vma *vma; local
308 struct nouveau_vma *vma; local
429 struct nouveau_vma *vma = local
525 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); local
863 struct nouveau_vma *vma = (void *)(unsigned long) local
[all...]
/linux-master/drivers/gpu/drm/
H A Ddrm_gem_dma_helper.c513 * @vma: VMA for the area to be mapped
522 int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma) argument
532 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
533 vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
536 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
539 vma, vma->vm_end - vma->vm_start,
542 ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_ob
[all...]
/linux-master/arch/sparc/mm/
H A Dfault_32.c115 struct vm_area_struct *vma; local
152 vma = lock_mm_and_find_vma(mm, address, regs);
153 if (!vma)
161 if (!(vma->vm_flags & VM_WRITE))
165 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
179 fault = handle_mm_fault(vma, address, flags, regs);
307 struct vm_area_struct *vma; local
315 vma = lock_mm_and_find_vma(mm, address, NULL);
316 if (!vma)
320 if (!(vma
[all...]
/linux-master/include/linux/
H A Dpagewalk.h11 /* mmap_lock should be locked for read to stabilize the vma tree */
13 /* vma will be write-locked during the walk */
15 /* vma is expected to be already write-locked during the walk */
35 * function is called with the vma lock held, in order to
38 * and retake the vma lock in order to avoid deadlocks
41 * ptl after dropping the vma lock, or else revalidate
42 * those items after re-acquiring the vma lock and before
45 * we walk over the current vma or not. Returning 0 means
46 * "do page table walk over the current vma", returning
48 * right now" and returning 1 means "skip the current vma"
111 struct vm_area_struct *vma; member in struct:mm_walk
[all...]
/linux-master/arch/microblaze/include/asm/
H A Dcacheflush.h85 #define flush_cache_page(vma, vmaddr, pfn) \
88 static inline void copy_to_user_page(struct vm_area_struct *vma, argument
94 if (vma->vm_flags & VM_EXEC) {

Completed in 287 milliseconds

<<11121314151617181920>>