1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
6#include <linux/init.h>
7#include <linux/list.h>
8#include <linux/llist.h>
9#include <asm/page.h>		/* pgprot_t */
10#include <linux/rbtree.h>
11#include <linux/overflow.h>
12
13#include <asm/vmalloc.h>
14
15struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
16struct notifier_block;		/* in notifier.h */
17struct iov_iter;		/* in uio.h */
18
19/* bits in flags of vmalloc's vm_struct below */
20#define VM_IOREMAP		0x00000001	/* ioremap() and friends */
21#define VM_ALLOC		0x00000002	/* vmalloc() */
22#define VM_MAP			0x00000004	/* vmap()ed pages */
23#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
24#define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
25#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
26#define VM_NO_GUARD		0x00000040      /* ***DANGEROUS*** don't add guard page */
27#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
28#define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */
29#define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */
30#define VM_ALLOW_HUGE_VMAP	0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
31
32#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
33	!defined(CONFIG_KASAN_VMALLOC)
34#define VM_DEFER_KMEMLEAK	0x00000800	/* defer kmemleak object creation */
35#else
36#define VM_DEFER_KMEMLEAK	0
37#endif
38#define VM_SPARSE		0x00001000	/* sparse vm_area. not all pages are present. */
39
40/* bits [20..32] reserved for arch specific ioremap internals */
41
42/*
43 * Maximum alignment for ioremap() regions.
44 * Can be overridden by arch-specific value.
45 */
46#ifndef IOREMAP_MAX_ORDER
47#define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
48#endif
49
50struct vm_struct {
51	struct vm_struct	*next;
52	void			*addr;
53	unsigned long		size;
54	unsigned long		flags;
55	struct page		**pages;
56#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
57	unsigned int		page_order;
58#endif
59	unsigned int		nr_pages;
60	phys_addr_t		phys_addr;
61	const void		*caller;
62};
63
64struct vmap_area {
65	unsigned long va_start;
66	unsigned long va_end;
67
68	struct rb_node rb_node;         /* address sorted rbtree */
69	struct list_head list;          /* address sorted list */
70
71	/*
72	 * The following two variables can be packed, because
73	 * a vmap_area object can be either:
74	 *    1) in "free" tree (root is free_vmap_area_root)
75	 *    2) or "busy" tree (root is vmap_area_root)
76	 */
77	union {
78		unsigned long subtree_max_size; /* in "free" tree */
79		struct vm_struct *vm;           /* in "busy" tree */
80	};
81	unsigned long flags; /* mark type of vm_map_ram area */
82};
83
84/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
85#ifndef arch_vmap_p4d_supported
86static inline bool arch_vmap_p4d_supported(pgprot_t prot)
87{
88	return false;
89}
90#endif
91
92#ifndef arch_vmap_pud_supported
93static inline bool arch_vmap_pud_supported(pgprot_t prot)
94{
95	return false;
96}
97#endif
98
99#ifndef arch_vmap_pmd_supported
100static inline bool arch_vmap_pmd_supported(pgprot_t prot)
101{
102	return false;
103}
104#endif
105
106#ifndef arch_vmap_pte_range_map_size
107static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
108							 u64 pfn, unsigned int max_page_shift)
109{
110	return PAGE_SIZE;
111}
112#endif
113
114#ifndef arch_vmap_pte_supported_shift
115static inline int arch_vmap_pte_supported_shift(unsigned long size)
116{
117	return PAGE_SHIFT;
118}
119#endif
120
121#ifndef arch_vmap_pgprot_tagged
122static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
123{
124	return prot;
125}
126#endif
127
128/*
129 *	Highlevel APIs for driver use
130 */
131extern void vm_unmap_ram(const void *mem, unsigned int count);
132extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
133extern void vm_unmap_aliases(void);
134
135#ifdef CONFIG_MMU
136extern unsigned long vmalloc_nr_pages(void);
137#else
138static inline unsigned long vmalloc_nr_pages(void) { return 0; }
139#endif
140
141extern void *vmalloc(unsigned long size) __alloc_size(1);
142extern void *vzalloc(unsigned long size) __alloc_size(1);
143extern void *vmalloc_user(unsigned long size) __alloc_size(1);
144extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
145extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
146extern void *vmalloc_32(unsigned long size) __alloc_size(1);
147extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
148extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
149extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
150			unsigned long start, unsigned long end, gfp_t gfp_mask,
151			pgprot_t prot, unsigned long vm_flags, int node,
152			const void *caller) __alloc_size(1);
153void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
154		int node, const void *caller) __alloc_size(1);
155void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
156
157extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
158extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
159extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
160extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
161
162extern void vfree(const void *addr);
163extern void vfree_atomic(const void *addr);
164
165extern void *vmap(struct page **pages, unsigned int count,
166			unsigned long flags, pgprot_t prot);
167void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
168extern void vunmap(const void *addr);
169
170extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
171				       unsigned long uaddr, void *kaddr,
172				       unsigned long pgoff, unsigned long size);
173
174extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
175							unsigned long pgoff);
176
177/*
178 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
179 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
180 * needs to be called.
181 */
182#ifndef ARCH_PAGE_TABLE_SYNC_MASK
183#define ARCH_PAGE_TABLE_SYNC_MASK 0
184#endif
185
186/*
187 * There is no default implementation for arch_sync_kernel_mappings(). It is
188 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
189 * is 0.
190 */
191void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
192
193/*
194 *	Lowlevel-APIs (not for driver use!)
195 */
196
197static inline size_t get_vm_area_size(const struct vm_struct *area)
198{
199	if (!(area->flags & VM_NO_GUARD))
200		/* return actual size without guard page */
201		return area->size - PAGE_SIZE;
202	else
203		return area->size;
204
205}
206
207extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
208extern struct vm_struct *get_vm_area_caller(unsigned long size,
209					unsigned long flags, const void *caller);
210extern struct vm_struct *__get_vm_area_caller(unsigned long size,
211					unsigned long flags,
212					unsigned long start, unsigned long end,
213					const void *caller);
214void free_vm_area(struct vm_struct *area);
215extern struct vm_struct *remove_vm_area(const void *addr);
216extern struct vm_struct *find_vm_area(const void *addr);
217struct vmap_area *find_vmap_area(unsigned long addr);
218
219static inline bool is_vm_area_hugepages(const void *addr)
220{
221	/*
222	 * This may not 100% tell if the area is mapped with > PAGE_SIZE
223	 * page table entries, if for some reason the architecture indicates
224	 * larger sizes are available but decides not to use them, nothing
225	 * prevents that. This only indicates the size of the physical page
226	 * allocated in the vmalloc layer.
227	 */
228#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
229	return find_vm_area(addr)->page_order > 0;
230#else
231	return false;
232#endif
233}
234
235#ifdef CONFIG_MMU
236int vm_area_map_pages(struct vm_struct *area, unsigned long start,
237		      unsigned long end, struct page **pages);
238void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
239			 unsigned long end);
240void vunmap_range(unsigned long addr, unsigned long end);
241static inline void set_vm_flush_reset_perms(void *addr)
242{
243	struct vm_struct *vm = find_vm_area(addr);
244
245	if (vm)
246		vm->flags |= VM_FLUSH_RESET_PERMS;
247}
248
249#else
250static inline void set_vm_flush_reset_perms(void *addr)
251{
252}
253#endif
254
255/* for /proc/kcore */
256extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
257
258/*
259 *	Internals.  Don't use..
260 */
261extern __init void vm_area_add_early(struct vm_struct *vm);
262extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
263
264#ifdef CONFIG_SMP
265# ifdef CONFIG_MMU
266struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
267				     const size_t *sizes, int nr_vms,
268				     size_t align);
269
270void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
271# else
272static inline struct vm_struct **
273pcpu_get_vm_areas(const unsigned long *offsets,
274		const size_t *sizes, int nr_vms,
275		size_t align)
276{
277	return NULL;
278}
279
280static inline void
281pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
282{
283}
284# endif
285#endif
286
287#ifdef CONFIG_MMU
288#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
289#else
290#define VMALLOC_TOTAL 0UL
291#endif
292
293int register_vmap_purge_notifier(struct notifier_block *nb);
294int unregister_vmap_purge_notifier(struct notifier_block *nb);
295
296#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
297bool vmalloc_dump_obj(void *object);
298#else
299static inline bool vmalloc_dump_obj(void *object) { return false; }
300#endif
301
302#endif /* _LINUX_VMALLOC_H */
303