Searched refs:vma (Results 251 - 275 of 1003) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/gpu/drm/i915/selftests/
H A Digt_spinner.c45 unsigned int mode, struct i915_vma **vma)
50 *vma = i915_vma_instance(obj, ce->vm, NULL);
51 if (IS_ERR(*vma))
52 return ERR_CAST(*vma);
67 ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
69 ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
129 struct i915_vma *hws, *vma; local
146 vma = spin->batch_vma;
152 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
193 *batch++ = lower_32_bits(i915_vma_offset(vma));
42 igt_spinner_pin_obj(struct intel_context *ce, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, unsigned int mode, struct i915_vma **vma) argument
[all...]
H A Di915_gem_evict.c55 struct i915_vma *vma; local
62 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
63 if (IS_ERR(vma)) {
65 if (vma == ERR_PTR(-ENOSPC))
68 return PTR_ERR(vma);
87 struct i915_vma *vma; local
89 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
90 if (i915_gem_object_has_tiling_quirk(vma->obj))
91 i915_vma_unpin(vma);
158 struct i915_vma *vma; local
252 struct i915_vma *vma; local
[all...]
/linux-master/samples/ftrace/
H A Dftrace-direct-too.c10 extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
13 void my_direct_func(struct vm_area_struct *vma, unsigned long address, argument
16 trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
17 vma, address, flags, regs);
/linux-master/arch/loongarch/include/asm/
H A Dcacheflush.h43 #define flush_cache_range(vma, start, end) do { } while (0)
44 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
47 #define flush_icache_user_page(vma, page, addr, len) do { } while (0)
/linux-master/arch/arm64/include/asm/
H A Dtlbflush.h203 * flush_tlb_range(vma, start, end)
205 * CPUs for the user address space corresponding to 'vma->mm'.
216 * flush_tlb_page(vma, addr)
218 * address space corresponding to 'vma->mm'. Note that this
234 * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
236 * CPUs for the user address space corresponding to 'vma->mm'.
291 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, argument
294 return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
297 static inline void flush_tlb_page(struct vm_area_struct *vma, argument
300 flush_tlb_page_nosync(vma, uadd
427 __flush_tlb_range_nosync(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) argument
464 __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) argument
474 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/arch/powerpc/kernel/
H A Dvdso.c85 struct vm_area_struct *vma, struct vm_fault *vmf);
119 struct vm_area_struct *vma; local
122 for_each_vma(vmi, vma) {
123 if (vma_is_special_mapping(vma, &vvar_spec))
124 zap_vma_pages(vma);
133 struct vm_area_struct *vma, struct vm_fault *vmf)
135 struct page *timens_page = find_timens_vvar_page(vma);
163 return vmf_insert_pfn(vma, vmf->address, pfn);
167 * This is called from binfmt_elf, we create the special vma for the
176 struct vm_area_struct *vma; local
132 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
[all...]
/linux-master/arch/sh/kernel/
H A Dsmp.c376 struct vm_area_struct *vma; member in struct:flush_tlb_data
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
388 void flush_tlb_range(struct vm_area_struct *vma, argument
391 struct mm_struct *mm = vma->vm_mm;
397 fd.vma = vma;
407 local_flush_tlb_range(vma, start, end);
431 local_flush_tlb_page(fd->vma, fd->addr1);
434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
437 if ((atomic_read(&vma
[all...]
/linux-master/arch/sh/mm/
H A Dcache.c60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, argument
77 if (vma->vm_flags & VM_EXEC)
78 flush_cache_page(vma, vaddr, page_to_pfn(page));
81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, argument
100 unsigned long vaddr, struct vm_area_struct *vma)
119 (vma->vm_flags & VM_EXEC))
141 void __update_cache(struct vm_area_struct *vma, argument
200 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, argument
205 data.vma = vma;
99 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) argument
212 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
243 flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) argument
[all...]
/linux-master/mm/
H A Dksm.c629 page = vm_normal_page(walk->vma, addr, ptent);
660 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
666 * of the process that owns 'vma'. We also do not want to enforce
669 static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) argument
679 ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
684 ret = handle_mm_fault(vma, addr,
716 static bool vma_ksm_compatible(struct vm_area_struct *vma) argument
718 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
723 if (vma_is_dax(vma))
727 if (vma
741 struct vm_area_struct *vma; local
754 struct vm_area_struct *vma; local
773 struct vm_area_struct *vma; local
1070 unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool lock_vma) argument
1202 struct vm_area_struct *vma; local
1278 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) argument
1370 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) argument
1475 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) argument
1544 struct vm_area_struct *vma; local
2377 struct vm_area_struct *vma; local
2567 struct vm_area_struct *vma; local
2796 __ksm_add_vma(struct vm_area_struct *vma) argument
2807 __ksm_del_vma(struct vm_area_struct *vma) argument
2828 ksm_add_vma(struct vm_area_struct *vma) argument
2838 struct vm_area_struct *vma; local
2847 struct vm_area_struct *vma; local
2926 ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) argument
3052 ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) argument
3122 struct vm_area_struct *vma; local
3181 struct vm_area_struct *vma; local
[all...]
H A Dkhugepaged.c83 * it would have happened if the vma was large enough during page
349 int hugepage_madvise(struct vm_area_struct *vma, argument
360 if (mm_has_pgste(vma->vm_mm))
366 * If the vma become good for khugepaged to scan,
370 khugepaged_enter_vma(vma, *vm_flags);
377 * this vma even if we leave the mm registered in khugepaged if
451 void khugepaged_enter_vma(struct vm_area_struct *vma, argument
454 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
456 if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
458 __khugepaged_enter(vma
541 __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte, struct collapse_control *cc, struct list_head *compound_pagelist) argument
686 __collapse_huge_page_copy_succeeded(pte_t *pte, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl, struct list_head *compound_pagelist) argument
739 __collapse_huge_page_copy_failed(pte_t *pte, pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, struct list_head *compound_pagelist) argument
778 __collapse_huge_page_copy(pte_t *pte, struct page *page, pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl, struct list_head *compound_pagelist) argument
919 struct vm_area_struct *vma; local
990 __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, int referenced) argument
1099 struct vm_area_struct *vma; local
1251 hpage_collapse_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, bool *mmap_locked, struct collapse_control *cc) argument
1448 set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, struct page *hpage) argument
1486 struct vm_area_struct *vma = vma_lookup(mm, haddr); local
1688 struct vm_area_struct *vma; local
2072 struct vm_area_struct *vma; local
2338 struct vm_area_struct *vma; variable in typeref:struct:vm_area_struct
2703 madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
[all...]
/linux-master/scripts/
H A Dextract-sys-certs.pl39 my $vma = Math::BigInt->new("0x" . $4);
45 vma => $vma,
109 my $s_vma = $sec->{vma};
143 my $foff = $start - $s->{vma} + $s->{foff};
/linux-master/arch/powerpc/include/asm/
H A Dpkeys.h38 static inline int vma_pkey(struct vm_area_struct *vma) argument
42 return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
128 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
130 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, argument
143 return __arch_override_mprotect_pkey(vma, prot, pkey);
/linux-master/arch/s390/include/asm/
H A Dhugetlb.h57 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, argument
60 return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
63 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, argument
69 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
70 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
/linux-master/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c398 struct vm_area_struct *vma; local
408 vma = find_vma_intersection(kvm->mm, start, end);
409 if (!vma) {
413 vma_start_write(vma);
415 vm_flags = vma->vm_flags;
416 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
422 vm_flags_reset(vma, vm_flags);
423 start = vma
513 __kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) argument
586 kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) argument
616 struct vm_area_struct *vma = NULL; local
740 kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool pagein) argument
798 struct vm_area_struct *vma; local
941 struct vm_area_struct *vma; local
1051 struct vm_area_struct *vma; local
[all...]
/linux-master/arch/mips/kernel/
H A Dsmp.c571 struct vm_area_struct *vma; member in struct:flush_tlb_data
580 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
583 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
585 struct mm_struct *mm = vma->vm_mm;
607 .vma = vma,
613 local_flush_tlb_range(vma, start, end);
616 int exec = vma->vm_flags & VM_EXEC;
628 local_flush_tlb_range(vma, start, end);
654 local_flush_tlb_page(fd->vma, f
657 flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
[all...]
/linux-master/arch/mips/include/asm/
H A Dcacheflush.h22 * - flush_cache_range(vma, start, end) flushes a range of pages
50 extern void (*flush_cache_range)(struct vm_area_struct *vma,
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
78 static inline void flush_anon_page(struct vm_area_struct *vma, argument
110 extern void copy_to_user_page(struct vm_area_struct *vma,
114 extern void copy_from_user_page(struct vm_area_struct *vma,
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_fbdev_fb.c70 struct drm_i915_gem_object *obj, struct i915_vma *vma)
90 (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
91 info->fix.smem_len = vma->size;
95 ret = i915_gem_object_lock(vma->obj, &ww);
100 vaddr = i915_vma_pin_iomap(vma);
69 intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, struct drm_i915_gem_object *obj, struct i915_vma *vma) argument
/linux-master/arch/um/kernel/
H A Dtrap.c28 struct vm_area_struct *vma; local
47 vma = find_vma(mm, address);
48 if (!vma)
50 if (vma->vm_start <= address)
52 if (!(vma->vm_flags & VM_GROWSDOWN))
56 vma = expand_stack(mm, address);
57 if (!vma)
63 if (!(vma->vm_flags & VM_WRITE))
68 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
75 fault = handle_mm_fault(vma, addres
[all...]
/linux-master/drivers/char/
H A Dbsr.c114 static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) argument
116 unsigned long size = vma->vm_end - vma->vm_start;
120 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
124 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
125 vma->vm_page_prot);
127 ret = io_remap_pfn_range(vma, vma
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_doorbell.c107 struct vm_area_struct *vma)
116 if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd))
127 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
130 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
137 (unsigned long long) vma->vm_start, address, vma->vm_flags,
141 return io_remap_pfn_range(vma,
142 vma
106 kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) argument
[all...]
/linux-master/arch/powerpc/mm/book3s64/
H A Dpkeys.c379 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) argument
382 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
385 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
391 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, argument
398 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
406 pkey = execute_only_pkey(vma->vm_mm);
412 return vma_pkey(vma);
447 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, argument
453 * Do not enforce our key-permissions on a foreign vma
[all...]
/linux-master/arch/mips/mm/
H A Dtlb-r3k.c67 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
71 struct mm_struct *mm = vma->vm_mm;
147 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
152 if (cpu_context(cpu, vma->vm_mm) != 0) {
157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
179 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) argument
188 if (current->active_mm != vma->vm_mm)
194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma
[all...]
/linux-master/arch/nios2/mm/
H A Dfault.c46 struct vm_area_struct *vma = NULL; local
90 vma = lock_mm_and_find_vma(mm, address, regs);
91 if (!vma)
105 if (!(vma->vm_flags & VM_EXEC))
109 if (!(vma->vm_flags & VM_READ))
113 if (!(vma->vm_flags & VM_WRITE))
124 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/arch/alpha/mm/
H A Dfault.c88 struct vm_area_struct * vma; local
122 vma = lock_mm_and_find_vma(mm, address, regs);
123 if (!vma)
130 if (!(vma->vm_flags & VM_EXEC))
134 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
137 if (!(vma->vm_flags & VM_WRITE))
145 fault = handle_mm_fault(vma, address, flags, regs);
/linux-master/drivers/uio/
H A Duio.c658 static int uio_find_mem_index(struct vm_area_struct *vma) argument
660 struct uio_device *idev = vma->vm_private_data;
662 if (vma->vm_pgoff < MAX_UIO_MAPS) {
663 if (idev->info->mem[vma->vm_pgoff].size == 0)
665 return (int)vma->vm_pgoff;
672 struct uio_device *idev = vmf->vma->vm_private_data;
685 mi = uio_find_mem_index(vmf->vma);
715 static int uio_mmap_logical(struct vm_area_struct *vma) argument
717 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
718 vma
728 uio_mmap_physical(struct vm_area_struct *vma) argument
763 uio_mmap_dma_coherent(struct vm_area_struct *vma) argument
806 uio_mmap(struct file *filep, struct vm_area_struct *vma) argument
[all...]

Completed in 237 milliseconds

<<11121314151617181920>>