1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
5 */
6
7#ifndef _ASM_RISCV_PGALLOC_H
8#define _ASM_RISCV_PGALLOC_H
9
10#include <linux/mm.h>
11#include <asm/tlb.h>
12
13#ifdef CONFIG_MMU
14#define __HAVE_ARCH_PUD_ALLOC_ONE
15#define __HAVE_ARCH_PUD_FREE
16#include <asm-generic/pgalloc.h>
17
18static inline void pmd_populate_kernel(struct mm_struct *mm,
19	pmd_t *pmd, pte_t *pte)
20{
21	unsigned long pfn = virt_to_pfn(pte);
22
23	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
24}
25
26static inline void pmd_populate(struct mm_struct *mm,
27	pmd_t *pmd, pgtable_t pte)
28{
29	unsigned long pfn = virt_to_pfn(page_address(pte));
30
31	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
32}
33
34#ifndef __PAGETABLE_PMD_FOLDED
35static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
36{
37	unsigned long pfn = virt_to_pfn(pmd);
38
39	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
40}
41
42static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
43{
44	if (pgtable_l4_enabled) {
45		unsigned long pfn = virt_to_pfn(pud);
46
47		set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
48	}
49}
50
51static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
52				     pud_t *pud)
53{
54	if (pgtable_l4_enabled) {
55		unsigned long pfn = virt_to_pfn(pud);
56
57		set_p4d_safe(p4d,
58			     __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
59	}
60}
61
62static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
63{
64	if (pgtable_l5_enabled) {
65		unsigned long pfn = virt_to_pfn(p4d);
66
67		set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
68	}
69}
70
71static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
72				     p4d_t *p4d)
73{
74	if (pgtable_l5_enabled) {
75		unsigned long pfn = virt_to_pfn(p4d);
76
77		set_pgd_safe(pgd,
78			     __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
79	}
80}
81
82#define pud_alloc_one pud_alloc_one
83static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
84{
85	if (pgtable_l4_enabled)
86		return __pud_alloc_one(mm, addr);
87
88	return NULL;
89}
90
91#define pud_free pud_free
92static inline void pud_free(struct mm_struct *mm, pud_t *pud)
93{
94	if (pgtable_l4_enabled)
95		__pud_free(mm, pud);
96}
97
98static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
99				  unsigned long addr)
100{
101	if (pgtable_l4_enabled) {
102		struct ptdesc *ptdesc = virt_to_ptdesc(pud);
103
104		pagetable_pud_dtor(ptdesc);
105		if (riscv_use_ipi_for_rfence())
106			tlb_remove_page_ptdesc(tlb, ptdesc);
107		else
108			tlb_remove_ptdesc(tlb, ptdesc);
109	}
110}
111
112#define p4d_alloc_one p4d_alloc_one
113static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
114{
115	if (pgtable_l5_enabled) {
116		gfp_t gfp = GFP_PGTABLE_USER;
117
118		if (mm == &init_mm)
119			gfp = GFP_PGTABLE_KERNEL;
120		return (p4d_t *)get_zeroed_page(gfp);
121	}
122
123	return NULL;
124}
125
126static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
127{
128	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
129	free_page((unsigned long)p4d);
130}
131
132#define p4d_free p4d_free
133static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
134{
135	if (pgtable_l5_enabled)
136		__p4d_free(mm, p4d);
137}
138
139static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
140				  unsigned long addr)
141{
142	if (pgtable_l5_enabled) {
143		if (riscv_use_ipi_for_rfence())
144			tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
145		else
146			tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
147	}
148}
149#endif /* __PAGETABLE_PMD_FOLDED */
150
151static inline void sync_kernel_mappings(pgd_t *pgd)
152{
153	memcpy(pgd + USER_PTRS_PER_PGD,
154	       init_mm.pgd + USER_PTRS_PER_PGD,
155	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
156}
157
158static inline pgd_t *pgd_alloc(struct mm_struct *mm)
159{
160	pgd_t *pgd;
161
162	pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
163	if (likely(pgd != NULL)) {
164		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
165		/* Copy kernel mappings */
166		sync_kernel_mappings(pgd);
167	}
168	return pgd;
169}
170
171#ifndef __PAGETABLE_PMD_FOLDED
172
173static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
174				  unsigned long addr)
175{
176	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
177
178	pagetable_pmd_dtor(ptdesc);
179	if (riscv_use_ipi_for_rfence())
180		tlb_remove_page_ptdesc(tlb, ptdesc);
181	else
182		tlb_remove_ptdesc(tlb, ptdesc);
183}
184
185#endif /* __PAGETABLE_PMD_FOLDED */
186
187static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
188				  unsigned long addr)
189{
190	struct ptdesc *ptdesc = page_ptdesc(pte);
191
192	pagetable_pte_dtor(ptdesc);
193	if (riscv_use_ipi_for_rfence())
194		tlb_remove_page_ptdesc(tlb, ptdesc);
195	else
196		tlb_remove_ptdesc(tlb, ptdesc);
197}
198#endif /* CONFIG_MMU */
199
200#endif /* _ASM_RISCV_PGALLOC_H */
201