/linux-master/arch/csky/abiv1/inc/abi/ |
H A D | cacheflush.h | 6 #include <linux/mm.h> 15 #define flush_cache_mm(mm) dcache_wbinv_all() 17 #define flush_cache_dup_mm(mm) cache_wbinv_all() 41 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 50 #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) 51 #define flush_icache_deferred(mm) do {} while (0);
|
/linux-master/arch/x86/entry/vdso/ |
H A D | vma.c | 7 #include <linux/mm.h> 83 (unsigned long)current->mm->context.vdso; 95 const struct vdso_image *image = current->mm->context.vdso_image; 98 current->mm->context.vdso = (void __user *)new_vma->vm_start; 113 struct mm_struct *mm = task->mm; local 115 VMA_ITERATOR(vmi, mm, 0); 117 mmap_read_lock(mm); 122 mmap_read_unlock(mm); 225 struct mm_struct *mm local 279 struct mm_struct *mm = current->mm; local [all...] |
/linux-master/arch/s390/mm/ |
H A D | hugetlbpage.c | 13 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 122 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) argument 127 if (!mm_uses_skeys(mm) || 145 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, argument 162 clear_huge_pte_skeys(mm, rste); 166 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, argument 169 __set_huge_pte_at(mm, addr, ptep, pte); 177 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, argument 185 pudp_xchg_direct(mm, add 191 huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) argument 213 huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) argument 309 struct mm_struct *mm = current->mm; local [all...] |
H A D | pgalloc.c | 11 #include <linux/mm.h> 44 unsigned long *crst_table_alloc(struct mm_struct *mm) argument 56 void crst_table_free(struct mm_struct *mm, unsigned long *table) argument 63 struct mm_struct *mm = arg; local 66 if (current->active_mm == mm) { 67 S390_lowcore.user_asce.val = mm->context.asce; 73 int crst_table_upgrade(struct mm_struct *mm, unsigned long end) argument 76 unsigned long asce_limit = mm->context.asce_limit; 85 p4d = crst_table_alloc(mm); 91 pgd = crst_table_alloc(mm); 138 page_table_alloc_pgste(struct mm_struct *mm) argument 160 page_table_alloc(struct mm_struct *mm) argument 187 page_table_free(struct mm_struct *mm, unsigned long *table) argument 215 pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) argument [all...] |
/linux-master/mm/ |
H A D | page_table_check.c | 8 #include <linux/mm.h> 152 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) argument 154 if (&init_mm == mm) 163 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) argument 165 if (&init_mm == mm) 174 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) argument 176 if (&init_mm == mm) 185 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, argument 190 if (&init_mm == mm) 194 __page_table_check_pte_clear(mm, ptep_ge 200 __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) argument 213 __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud) argument 226 __page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd) argument [all...] |
H A D | mm_slot.h | 10 * struct mm_slot - hash lookup from mm to mm_slot 13 * @mm: the mm that this information is valid for 18 struct mm_struct *mm; member in struct:mm_slot 41 if (_mm == tmp_slot->mm) { \ 51 _mm_slot->mm = _mm; \
|
/linux-master/arch/mips/include/asm/ |
H A D | tlbflush.h | 5 #include <linux/mm.h> 11 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 40 #define flush_tlb_mm(mm) drop_mmu_context(mm)
|
H A D | dsemul.h | 99 * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state 100 * @mm: The struct mm_struct to cleanup state for. 102 * Cleanup state for the given @mm, ensuring that any memory allocated 104 * before @mm is freed in order to avoid memory leaks. 107 extern void dsemul_mm_cleanup(struct mm_struct *mm); 109 static inline void dsemul_mm_cleanup(struct mm_struct *mm) argument
|
/linux-master/arch/um/kernel/skas/ |
H A D | process.c | 7 #include <linux/sched/mm.h> 51 if (current->mm == NULL) 54 return current->mm->context.id.stack;
|
/linux-master/arch/openrisc/mm/ |
H A D | fault.c | 14 #include <linux/mm.h> 50 struct mm_struct *mm; local 96 mm = tsk->mm; 104 if (in_interrupt() || !mm) 110 mmap_read_lock(mm); 111 vma = find_vma(mm, address); 132 vma = expand_stack(mm, address); 192 /* No need to mmap_read_unlock(mm) as we would 194 * in mm/filema [all...] |
/linux-master/arch/parisc/include/asm/ |
H A D | hugetlb.h | 8 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 12 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 38 void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
/linux-master/arch/riscv/include/asm/ |
H A D | cacheflush.h | 9 #include <linux/mm.h> 47 #define flush_icache_mm(mm, local) flush_icache_all() 52 void flush_icache_mm(struct mm_struct *mm, bool local);
|
/linux-master/arch/arm64/mm/ |
H A D | pgd.c | 9 #include <linux/mm.h> 31 pgd_t *pgd_alloc(struct mm_struct *mm) argument 41 void pgd_free(struct mm_struct *mm, pgd_t *pgd) argument
|
/linux-master/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_mqd_manager_v9.c | 37 static void update_mqd(struct mqd_manager *mm, void *mqd, 41 static uint64_t mqd_stride_v9(struct mqd_manager *mm, argument 44 if (mm->dev->kfd->cwsr_enabled && 49 return mm->mqd_size; 62 static void update_cu_mask(struct mqd_manager *mm, void *mqd, argument 71 mqd_symmetrically_map_cu_mask(mm, 80 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) { 158 static void init_mqd(struct mqd_manager *mm, void **mqd, argument 207 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { 223 update_mqd(mm, 226 load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) argument 238 update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) argument 326 get_wave_state(struct mqd_manager *mm, void *mqd, struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) argument 362 get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size) argument 369 checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) argument 381 restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, u32 ctl_stack_size) argument 413 init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) argument 427 destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) argument 446 init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) argument 465 update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) argument 492 checkpoint_mqd_sdma(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) argument 504 restore_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, const u32 ctl_stack_size) argument 528 init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) argument 559 hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) argument 583 destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) argument 621 init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) argument 687 update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) argument 720 destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) argument 748 load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) argument 775 get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_lmem.c | 23 offset -= obj->mm.region->region.start; 25 return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); 44 struct intel_memory_region *mr = READ_ONCE(obj->mm.region); 68 struct intel_memory_region *mr = READ_ONCE(obj->mm.region); 105 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0], 141 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
|
H A D | i915_gemfs.c | 46 i915->mm.gemfs = gemfs; 59 kern_unmount(i915->mm.gemfs);
|
/linux-master/arch/arm64/include/asm/ |
H A D | efi.h | 29 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 30 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, 121 static inline void efi_set_pgd(struct mm_struct *mm) argument 123 __switch_mm(mm); 126 if (mm != current->active_mm) { 134 update_saved_ttbr0(current, mm);
|
/linux-master/arch/arc/mm/ |
H A D | mmap.c | 11 #include <linux/mm.h> 13 #include <linux/sched/mm.h> 28 struct mm_struct *mm = current->mm; local 48 vma = find_vma(mm, addr); 56 info.low_limit = mm->mmap_base;
|
/linux-master/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 17 #include <linux/mm.h> 50 bool mm_iommu_preregistered(struct mm_struct *mm) argument 52 return !list_empty(&mm->context.iommu_group_mem_list); 56 static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, argument 66 ret = account_locked_vm(mm, entries, true); 99 mmap_read_lock(mm); 117 mmap_read_unlock(mm); 132 list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next, 165 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); 181 account_locked_vm(mm, locked_entrie 186 mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) argument 194 mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) argument 249 mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) argument 288 mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size) argument 308 mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) argument 354 mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, unsigned int pageshift, unsigned long *size) argument 399 mm_iommu_init(struct mm_struct *mm) argument [all...] |
/linux-master/arch/sparc/include/asm/ |
H A D | cacheflush_64.h | 9 #include <linux/mm.h> 22 do { if ((__mm) == current->mm) flushw_user(); } while(0) 23 #define flush_cache_dup_mm(mm) flush_cache_mm(mm) 41 void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio); 44 #define flush_dcache_folio_all(mm, folio) flush_dcache_folio_impl(folio)
|
/linux-master/include/misc/ |
H A D | cxl-base.h | 37 void cxl_slbia(struct mm_struct *mm); 44 static inline void cxl_slbia(struct mm_struct *mm) {} argument
|
/linux-master/arch/s390/include/asm/ |
H A D | vdso.h | 14 #define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) 16 #define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_mm.c | 25 #include <linux/mm.h> 33 struct mm_struct *mm; member in struct:remap_pfn 59 set_pte_at(r->mm, addr, pte, 78 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); 92 * Note: this is only safe if the mm semaphore is held when called. 104 r.mm = vma->vm_mm; 109 err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); 127 * Note: this is only safe if the mm semaphore is held when called. 134 .mm = vma->vm_mm, 147 err = apply_to_page_range(r.mm, add [all...] |
/linux-master/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | tegra.h | 5 #include <core/mm.h> 21 * Protects accesses to mm from subsystems 25 struct nvkm_mm mm; member in struct:nvkm_device_tegra::__anon837
|
/linux-master/fs/ramfs/ |
H A D | file-mmu.c | 28 #include <linux/mm.h> 38 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
|