1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_GENERIC_PGALLOC_H
3#define __ASM_GENERIC_PGALLOC_H
4
5#ifdef CONFIG_MMU
6
7#define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
8#define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9
10/**
11 * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12 * @mm: the mm_struct of the current context
13 *
14 * This function is intended for architectures that need
15 * anything beyond simple page allocation.
16 *
17 * Return: pointer to the allocated memory or %NULL on error
18 */
19static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
20{
21	struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL &
22			~__GFP_HIGHMEM, 0);
23
24	if (!ptdesc)
25		return NULL;
26	return ptdesc_address(ptdesc);
27}
28
29#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
30/**
31 * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
32 * @mm: the mm_struct of the current context
33 *
34 * Return: pointer to the allocated memory or %NULL on error
35 */
36static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
37{
38	return __pte_alloc_one_kernel(mm);
39}
40#endif
41
42/**
43 * pte_free_kernel - free PTE-level kernel page table memory
44 * @mm: the mm_struct of the current context
45 * @pte: pointer to the memory containing the page table
46 */
47static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
48{
49	pagetable_free(virt_to_ptdesc(pte));
50}
51
52/**
53 * __pte_alloc_one - allocate memory for a PTE-level user page table
54 * @mm: the mm_struct of the current context
55 * @gfp: GFP flags to use for the allocation
56 *
57 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
58 *
59 * This function is intended for architectures that need
60 * anything beyond simple page allocation or must have custom GFP flags.
61 *
62 * Return: `struct page` referencing the ptdesc or %NULL on error
63 */
64static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
65{
66	struct ptdesc *ptdesc;
67
68	ptdesc = pagetable_alloc(gfp, 0);
69	if (!ptdesc)
70		return NULL;
71	if (!pagetable_pte_ctor(ptdesc)) {
72		pagetable_free(ptdesc);
73		return NULL;
74	}
75
76	return ptdesc_page(ptdesc);
77}
78
79#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
80/**
81 * pte_alloc_one - allocate a page for PTE-level user page table
82 * @mm: the mm_struct of the current context
83 *
84 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
85 *
86 * Return: `struct page` referencing the ptdesc or %NULL on error
87 */
88static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
89{
90	return __pte_alloc_one(mm, GFP_PGTABLE_USER);
91}
92#endif
93
94/*
95 * Should really implement gc for free page table pages. This could be
96 * done with a reference count in struct page.
97 */
98
99/**
100 * pte_free - free PTE-level user page table memory
101 * @mm: the mm_struct of the current context
102 * @pte_page: the `struct page` referencing the ptdesc
103 */
104static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
105{
106	struct ptdesc *ptdesc = page_ptdesc(pte_page);
107
108	pagetable_pte_dtor(ptdesc);
109	pagetable_free(ptdesc);
110}
111
112
113#if CONFIG_PGTABLE_LEVELS > 2
114
115#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
116/**
117 * pmd_alloc_one - allocate memory for a PMD-level page table
118 * @mm: the mm_struct of the current context
119 *
120 * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
121 *
122 * Allocations use %GFP_PGTABLE_USER in user context and
123 * %GFP_PGTABLE_KERNEL in kernel context.
124 *
125 * Return: pointer to the allocated memory or %NULL on error
126 */
127static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
128{
129	struct ptdesc *ptdesc;
130	gfp_t gfp = GFP_PGTABLE_USER;
131
132	if (mm == &init_mm)
133		gfp = GFP_PGTABLE_KERNEL;
134	ptdesc = pagetable_alloc(gfp, 0);
135	if (!ptdesc)
136		return NULL;
137	if (!pagetable_pmd_ctor(ptdesc)) {
138		pagetable_free(ptdesc);
139		return NULL;
140	}
141	return ptdesc_address(ptdesc);
142}
143#endif
144
145#ifndef __HAVE_ARCH_PMD_FREE
146static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
147{
148	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
149
150	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
151	pagetable_pmd_dtor(ptdesc);
152	pagetable_free(ptdesc);
153}
154#endif
155
156#endif /* CONFIG_PGTABLE_LEVELS > 2 */
157
158#if CONFIG_PGTABLE_LEVELS > 3
159
160static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
161{
162	gfp_t gfp = GFP_PGTABLE_USER;
163	struct ptdesc *ptdesc;
164
165	if (mm == &init_mm)
166		gfp = GFP_PGTABLE_KERNEL;
167	gfp &= ~__GFP_HIGHMEM;
168
169	ptdesc = pagetable_alloc(gfp, 0);
170	if (!ptdesc)
171		return NULL;
172
173	pagetable_pud_ctor(ptdesc);
174	return ptdesc_address(ptdesc);
175}
176
177#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
178/**
179 * pud_alloc_one - allocate memory for a PUD-level page table
180 * @mm: the mm_struct of the current context
181 *
182 * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
183 * and %GFP_PGTABLE_KERNEL for kernel context.
184 *
185 * Return: pointer to the allocated memory or %NULL on error
186 */
187static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
188{
189	return __pud_alloc_one(mm, addr);
190}
191#endif
192
193static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
194{
195	struct ptdesc *ptdesc = virt_to_ptdesc(pud);
196
197	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
198	pagetable_pud_dtor(ptdesc);
199	pagetable_free(ptdesc);
200}
201
202#ifndef __HAVE_ARCH_PUD_FREE
203static inline void pud_free(struct mm_struct *mm, pud_t *pud)
204{
205	__pud_free(mm, pud);
206}
207#endif
208
209#endif /* CONFIG_PGTABLE_LEVELS > 3 */
210
211#ifndef __HAVE_ARCH_PGD_FREE
212static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
213{
214	pagetable_free(virt_to_ptdesc(pgd));
215}
216#endif
217
218#endif /* CONFIG_MMU */
219
220#endif /* __ASM_GENERIC_PGALLOC_H */
221