Searched refs:vma (Results 51 - 75 of 1003) sorted by relevance

1234567891011>>

/linux-master/fs/proc/
H A Dtask_nommu.c24 struct vm_area_struct *vma; local
29 for_each_vma(vmi, vma) {
30 bytes += kobjsize(vma);
32 region = vma->vm_region;
37 size = vma->vm_end - vma->vm_start;
41 is_nommu_shared_mapping(vma->vm_flags)) {
46 slack = region->vm_end - vma->vm_end;
84 struct vm_area_struct *vma; local
88 for_each_vma(vmi, vma)
99 struct vm_area_struct *vma; local
127 nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) argument
181 struct vm_area_struct *vma = vma_next(&priv->iter); local
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c397 struct i915_vma *vma; local
399 vma = i915_vma_instance(obj, vm, NULL);
400 if (!IS_ERR(vma))
401 ignored = i915_vma_unbind_unlocked(vma);
420 struct i915_vma *vma; local
461 vma = i915_vma_instance(obj, vm, NULL);
462 if (IS_ERR(vma))
471 err = i915_vma_pin(vma, 0, 0, offset | flags);
478 if (!drm_mm_node_allocated(&vma->node) ||
479 i915_vma_misplaced(vma,
654 struct i915_vma *vma; local
721 struct i915_vma *vma; local
810 struct i915_vma *vma; local
917 struct i915_vma *vma; local
1021 struct i915_vma *vma; local
1083 struct i915_vma *vma; local
1436 track_vma_bind(struct i915_vma *vma) argument
1504 reserve_gtt_with_resource(struct i915_vma *vma, u64 offset) argument
1549 struct i915_vma *vma; local
1594 struct i915_vma *vma; local
1638 struct i915_vma *vma; local
1685 insert_gtt_with_resource(struct i915_vma *vma) argument
1771 struct i915_vma *vma; local
1812 struct i915_vma *vma; local
1831 struct i915_vma *vma; local
1870 struct i915_vma *vma; local
[all...]
H A Di915_vma.c37 static bool assert_vma(struct i915_vma *vma, argument
43 if (vma->vm != ctx->vm) {
48 if (vma->size != obj->base.size) {
50 vma->size, obj->base.size);
54 if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
56 vma->gtt_view.type);
68 struct i915_vma *vma; local
71 vma = i915_vma_instance(obj, vm, view);
72 if (IS_ERR(vma))
73 return vma;
119 struct i915_vma *vma; local
227 assert_pin_valid(const struct i915_vma *vma, const struct pin_mode *mode, int result) argument
241 assert_pin_enospc(const struct i915_vma *vma, const struct pin_mode *mode, int result) argument
249 assert_pin_einval(const struct i915_vma *vma, const struct pin_mode *mode, int result) argument
312 struct i915_vma *vma; local
616 struct i915_vma *vma; local
713 assert_partial(struct drm_i915_gem_object *obj, struct i915_vma *vma, unsigned long offset, unsigned long size) argument
743 assert_pin(struct i915_vma *vma, struct i915_gtt_view *view, u64 size, const char *name) argument
804 struct i915_vma *vma; local
1004 struct i915_vma *vma; local
[all...]
/linux-master/drivers/xen/xenfs/
H A Dxenstored.c34 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) argument
36 size_t size = vma->vm_end - vma->vm_start;
38 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
41 if (remap_pfn_range(vma, vma->vm_start,
43 size, vma->vm_page_prot))
/linux-master/arch/powerpc/include/asm/book3s/32/
H A Dtlbflush.h12 void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
55 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) argument
58 hash__flush_tlb_page(vma, vmaddr);
64 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
66 flush_range(vma->vm_mm, start, end);
74 static inline void local_flush_tlb_page(struct vm_area_struct *vma, argument
77 flush_tlb_page(vma, vmaddr);
/linux-master/mm/
H A Dmemory.c363 struct vm_area_struct *vma, unsigned long floor,
367 unsigned long addr = vma->vm_start;
379 * Hide vma from rmap and truncate_pagecache before freeing
383 vma_start_write(vma);
384 unlink_anon_vmas(vma);
385 unlink_file_vma(vma);
387 if (is_vm_hugetlb_page(vma)) {
388 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
394 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
396 vma
362 free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling, bool mm_wr_locked) argument
489 print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) argument
582 vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
645 vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
656 vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
696 vm_normal_folio_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
707 restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) argument
757 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr) argument
1040 folio_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, unsigned long addr, bool need_zero) argument
1448 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval) argument
1469 zap_present_folio_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, struct folio *folio, struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, bool *force_break) argument
1521 zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsigned int max_nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, bool *force_break) argument
1568 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1688 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) argument
1730 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, struct zap_details *details) argument
1759 zap_p4d_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1778 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) argument
1799 unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details, bool mm_wr_locked) argument
1864 unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long tree_end, bool mm_wr_locked) argument
1900 zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) argument
1935 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) argument
1989 insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) argument
2011 insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) argument
2031 insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) argument
2047 insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) argument
2120 vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) argument
2166 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
2193 __vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num, unsigned long offset) argument
2236 vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num) argument
2256 vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num) argument
2263 insert_pfn(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t prot, bool mkwrite) argument
2350 vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot) argument
2398 vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
2405 vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) argument
2419 __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, bool mkwrite) argument
2465 vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) argument
2477 vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) argument
2586 remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
2651 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
2682 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
2966 struct vm_area_struct *vma = vmf->vma; local
3060 __get_fault_gfp_mask(struct vm_area_struct *vma) argument
3115 struct vm_area_struct *vma = vmf->vma; local
3169 struct vm_area_struct *vma = vmf->vma; variable in typeref:struct:vm_area_struct
3201 struct vm_area_struct *vma = vmf->vma; local
3211 struct vm_area_struct *vma = vmf->vma; local
3244 struct vm_area_struct *vma = vmf->vma; local
3440 struct vm_area_struct *vma = vmf->vma; local
3463 struct vm_area_struct *vma = vmf->vma; variable in typeref:struct:vm_area_struct
3500 wp_can_reuse_anon_folio(struct folio *folio, struct vm_area_struct *vma) argument
3574 struct vm_area_struct *vma = vmf->vma; variable in typeref:struct:vm_area_struct
3663 unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) argument
3675 struct vm_area_struct *vma; local
3797 struct vm_area_struct *vma = vmf->vma; local
3836 should_try_to_free_swap(struct folio *folio, struct vm_area_struct *vma, unsigned int fault_flags) argument
3932 struct vm_area_struct *vma = vmf->vma; local
4308 struct vm_area_struct *vma = vmf->vma; local
4386 struct vm_area_struct *vma = vmf->vma; local
4506 struct vm_area_struct *vma = vmf->vma; local
4563 struct vm_area_struct *vma = vmf->vma; local
4577 struct vm_area_struct *vma = vmf->vma; local
4656 struct vm_area_struct *vma = vmf->vma; local
4715 struct vm_area_struct *vma = vmf->vma; local
4908 struct vm_area_struct *vma = vmf->vma; local
4946 struct vm_area_struct *vma = vmf->vma; local
4996 struct vm_area_struct *vma = vmf->vma; local
5038 numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) argument
5057 struct vm_area_struct *vma = vmf->vma; local
5173 struct vm_area_struct *vma = vmf->vma; local
5184 struct vm_area_struct *vma = vmf->vma; local
5217 struct vm_area_struct *vma = vmf->vma; local
5231 struct vm_area_struct *vma = vmf->vma; local
5350 __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
5513 lru_gen_enter_fault(struct vm_area_struct *vma) argument
5524 lru_gen_enter_fault(struct vm_area_struct *vma) argument
5533 sanitize_fault_flags(struct vm_area_struct *vma, unsigned int *flags) argument
5574 handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) argument
5691 struct vm_area_struct *vma; local
5754 struct vm_area_struct *vma; local
5941 follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) argument
5961 follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) argument
6005 generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
6082 struct vm_area_struct *vma = NULL; local
6185 struct vm_area_struct *vma; local
6324 copy_user_gigantic_page(struct folio *dst, struct folio *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument
6350 struct vm_area_struct *vma; member in struct:copy_subpage_arg
6366 copy_user_large_folio(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma) argument
[all...]
/linux-master/include/linux/
H A Dhuge_mm.h17 struct vm_area_struct *vma);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
87 #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
88 (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
142 * - For file vma, check if the linear page offset of vma i
151 thp_vma_suitable_order(struct vm_area_struct *vma, unsigned long addr, int order) argument
176 thp_vma_suitable_orders(struct vm_area_struct *vma, unsigned long addr, unsigned long orders) argument
199 file_thp_enabled(struct vm_area_struct *vma) argument
235 thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps, bool in_pf, bool enforce_sysfs, unsigned long orders) argument
319 pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) argument
327 pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) argument
394 thp_vma_suitable_order(struct vm_area_struct *vma, unsigned long addr, int order) argument
400 thp_vma_suitable_orders(struct vm_area_struct *vma, unsigned long addr, unsigned long orders) argument
406 thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps, bool in_pf, bool enforce_sysfs, unsigned long orders) argument
439 __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) argument
441 split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct folio *folio) argument
447 hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) argument
453 madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
460 vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) argument
470 pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) argument
475 pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) argument
506 follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) argument
512 follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) argument
[all...]
H A Dpkeys.h14 #define arch_override_mprotect_pkey(vma, prot, pkey) (0)
18 static inline int vma_pkey(struct vm_area_struct *vma) argument
/linux-master/arch/alpha/include/asm/
H A Dtlbflush.h39 struct vm_area_struct *vma,
43 if (vma->vm_flags & VM_EXEC) {
52 struct vm_area_struct *vma,
55 if (vma->vm_flags & VM_EXEC)
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) argument
119 struct mm_struct *mm = vma->vm_mm;
122 flush_tlb_current_page(mm, vma, addr);
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
133 flush_tlb_mm(vma->vm_mm);
38 ev4_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) argument
51 ev5_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) argument
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c62 mmap_read_unlock(vmf->vma->vm_mm);
134 mmap_read_unlock(vmf->vma->vm_mm);
185 struct vm_area_struct *vma = vmf->vma; local
186 struct ttm_buffer_object *bo = vma->vm_private_data;
210 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
211 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
212 page_last = vma_pages(vma) + vma->vm_pgoff -
260 * the value of @vma
293 struct vm_area_struct *vma = vmf->vma; local
323 struct vm_area_struct *vma = vmf->vma; local
350 ttm_bo_vm_open(struct vm_area_struct *vma) argument
360 ttm_bo_vm_close(struct vm_area_struct *vma) argument
408 ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
459 ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) argument
[all...]
/linux-master/arch/arm64/include/asm/
H A Dhugetlb.h33 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
43 extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
54 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
58 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
65 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, argument
69 unsigned long stride = huge_page_size(hstate_vma(vma));
72 __flush_tlb_range(vma, start, end, stride, false, 2);
74 __flush_tlb_range(vma, start, end, stride, false, 1);
76 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); local
803 if (vma) {
804 vma->addr = addr;
805 vma->size = size;
806 vma->page = NVKM_VMA_PAGE_NONE;
807 vma->refd = NVKM_VMA_PAGE_NONE;
809 return vma;
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) argument
817 BUG_ON(vma
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
918 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); local
934 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev, struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) argument
976 nvkm_vmm_node_split(struct nvkm_vmm *vmm, struct nvkm_vma *vma, u64 addr, u64 size) argument
1003 nvkm_vma_dump(struct nvkm_vma *vma) argument
1021 struct nvkm_vma *vma; local
1030 struct nvkm_vma *vma; local
1044 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); local
1078 struct nvkm_vma *vma; local
1098 struct nvkm_vma *vma; local
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, u64 addr, u64 size, u8 page, bool map) argument
1240 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); local
1277 struct nvkm_vma *vma, *tmp; local
1397 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
1414 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) argument
1429 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
1439 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, struct nvkm_vmm_map *map) argument
1475 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, struct nvkm_vmm_map *map) argument
1487 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, struct nvkm_vmm_map *map) argument
1575 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, struct nvkm_vmm_map *map) argument
1592 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
1611 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) argument
1695 struct nvkm_vma *vma = *pvma; local
1710 struct nvkm_vma *vma = NULL, *tmp; local
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mmap.c37 static void rxe_vma_open(struct vm_area_struct *vma) argument
39 struct rxe_mmap_info *ip = vma->vm_private_data;
44 static void rxe_vma_close(struct vm_area_struct *vma) argument
46 struct rxe_mmap_info *ip = vma->vm_private_data;
59 * @vma: the VMA to be initialized
62 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) argument
65 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
66 unsigned long size = vma->vm_end - vma->vm_start;
99 ret = remap_vmalloc_range(vma, i
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_ring.c37 struct i915_vma *vma = ring->vma; local
46 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
48 if (i915_gem_object_is_stolen(vma->obj))
53 ret = i915_ggtt_pin(vma, ww, 0, flags);
57 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
58 addr = (void __force *)i915_vma_pin_iomap(vma);
60 int type = intel_gt_coherent_map_type(vma->vm->gt, vma
96 struct i915_vma *vma = ring->vma; local
116 struct i915_vma *vma; local
150 struct i915_vma *vma; local
[all...]
/linux-master/arch/openrisc/include/asm/
H A Dtlbflush.h27 * - flush_tlb_page(vma, vmaddr) flushes one page
28 * - flush_tlb_range(vma, start, end) flushes a range of pages
32 extern void local_flush_tlb_page(struct vm_area_struct *vma,
34 extern void local_flush_tlb_range(struct vm_area_struct *vma,
46 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
47 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
/linux-master/arch/parisc/include/asm/
H A Dtlbflush.h19 #define flush_tlb_range(vma, start, end) \
20 __flush_tlb_range((vma)->vm_mm->context.space_id, start, end)
64 static inline void flush_tlb_page(struct vm_area_struct *vma, argument
67 purge_tlb_entries(vma->vm_mm, addr);
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Digt_gem_utils.c42 igt_emit_store_dw(struct i915_vma *vma, argument
48 const int ver = GRAPHICS_VER(vma->vm->i915);
55 obj = i915_gem_object_create_internal(vma->vm->i915, size);
65 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
66 offset += i915_vma_offset(vma);
92 intel_gt_chipset_flush(vma->vm->gt);
94 vma = i915_vma_instance(obj, vma->vm, NULL);
95 if (IS_ERR(vma)) {
96 err = PTR_ERR(vma);
111 igt_gpu_fill_dw(struct intel_context *ce, struct i915_vma *vma, u64 offset, unsigned long count, u32 val) argument
[all...]
/linux-master/arch/x86/um/
H A Dmem_32.c44 struct vm_area_struct *vma = get_gate_vma(mm); local
46 if (!vma)
49 return (addr >= vma->vm_start) && (addr < vma->vm_end);
/linux-master/mm/damon/
H A Dops-common.h12 void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr);
13 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr);
/linux-master/arch/hexagon/mm/
H A Dvm_tlb.c26 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
29 struct mm_struct *mm = vma->vm_mm;
65 * Flush TLB state associated with a page of a vma.
67 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr) argument
69 struct mm_struct *mm = vma->vm_mm;
77 * Like flush range, but without the check on the vma->vm_mm.
/linux-master/drivers/gpu/drm/i915/
H A Di915_mm.c86 * @vma: user vma to map to
94 int remap_io_mapping(struct vm_area_struct *vma, argument
101 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
104 r.mm = vma->vm_mm;
107 (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
111 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
121 * @vma: user vma to map to
129 int remap_io_sg(struct vm_area_struct *vma, argument
[all...]
/linux-master/arch/x86/xen/
H A Dmmu.h20 pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
21 void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dhugetlb.h12 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
13 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
15 extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
56 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
60 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
86 static inline void flush_hugetlb_page(struct vm_area_struct *vma, argument
90 return radix__flush_hugetlb_page(vma, vmaddr);
106 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
/linux-master/drivers/media/common/videobuf2/
H A Dvideobuf2-memops.c84 * vb2_common_vm_open() - increase refcount of the vma
85 * @vma: virtual memory region for the mapping
87 * This function adds another user to the provided vma. It expects
88 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
90 static void vb2_common_vm_open(struct vm_area_struct *vma) argument
92 struct vb2_vmarea_handler *h = vma->vm_private_data;
94 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
95 __func__, h, refcount_read(h->refcount), vma->vm_start,
96 vma->vm_end);
102 * vb2_common_vm_close() - decrease refcount of the vma
108 vb2_common_vm_close(struct vm_area_struct *vma) argument
[all...]
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_ttm_glue.c70 int vmw_mmap(struct file *filp, struct vm_area_struct *vma) argument
85 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
88 ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
92 ret = ttm_bo_mmap_obj(vma, bo);
96 vma->vm_ops = &vmw_vm_ops;
99 if (!is_cow_mapping(vma->vm_flags))
100 vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);

Completed in 234 milliseconds

1234567891011>>