1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#ifndef _ASM_PAGE_H
6#define _ASM_PAGE_H
7
8#include <linux/const.h>
9#include <asm/addrspace.h>
10
11/*
12 * PAGE_SHIFT determines the page size
13 */
14#define PAGE_SHIFT	CONFIG_PAGE_SHIFT
15#define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
16#define PAGE_MASK	(~(PAGE_SIZE - 1))
17
18#define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
19#define HPAGE_SIZE	(_AC(1, UL) << HPAGE_SHIFT)
20#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
21#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
22
23#ifndef __ASSEMBLY__
24
25#include <linux/kernel.h>
26#include <linux/pfn.h>
27
28/*
29 * It's normally defined only for FLATMEM config but it's
30 * used in our early mem init code for all memory models.
31 * So always define it.
32 */
33#define ARCH_PFN_OFFSET	PFN_UP(PHYS_OFFSET)
34
35extern void clear_page(void *page);
36extern void copy_page(void *to, void *from);
37
38#define clear_user_page(page, vaddr, pg)	clear_page(page)
39#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
40
41extern unsigned long shm_align_mask;
42
43struct page;
44struct vm_area_struct;
45void copy_user_highpage(struct page *to, struct page *from,
46	      unsigned long vaddr, struct vm_area_struct *vma);
47
48#define __HAVE_ARCH_COPY_USER_HIGHPAGE
49
50typedef struct { unsigned long pte; } pte_t;
51#define pte_val(x)	((x).pte)
52#define __pte(x)	((pte_t) { (x) })
53typedef struct page *pgtable_t;
54
55typedef struct { unsigned long pgd; } pgd_t;
56#define pgd_val(x)	((x).pgd)
57#define __pgd(x)	((pgd_t) { (x) })
58
59/*
60 * Manipulate page protection bits
61 */
62typedef struct { unsigned long pgprot; } pgprot_t;
63#define pgprot_val(x)	((x).pgprot)
64#define __pgprot(x)	((pgprot_t) { (x) })
65#define pte_pgprot(x)	__pgprot(pte_val(x) & ~_PFN_MASK)
66
67#define ptep_buddy(x)	((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
68
69/*
70 * __pa()/__va() should be used only during mem init.
71 */
72#define __pa(x)		PHYSADDR(x)
73#define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
74
75#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
76#define sym_to_pfn(x)		__phys_to_pfn(__pa_symbol(x))
77
78struct page *dmw_virt_to_page(unsigned long kaddr);
79struct page *tlb_virt_to_page(unsigned long kaddr);
80
81#define pfn_to_phys(pfn)	__pfn_to_phys(pfn)
82#define phys_to_pfn(paddr)	__phys_to_pfn(paddr)
83
84#define page_to_phys(page)	pfn_to_phys(page_to_pfn(page))
85#define phys_to_page(paddr)	pfn_to_page(phys_to_pfn(paddr))
86
87#ifndef CONFIG_KFENCE
88
89#define page_to_virt(page)	__va(page_to_phys(page))
90#define virt_to_page(kaddr)	phys_to_page(__pa(kaddr))
91
92#else
93
94#define WANT_PAGE_VIRTUAL
95
96#define page_to_virt(page)								\
97({											\
98	extern char *__kfence_pool;							\
99	(__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page);	\
100})
101
102#define virt_to_page(kaddr)								\
103({											\
104	(likely((unsigned long)kaddr < vm_map_base)) ?					\
105	dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
106})
107
108#endif
109
110#define pfn_to_virt(pfn)	page_to_virt(pfn_to_page(pfn))
111#define virt_to_pfn(kaddr)	page_to_pfn(virt_to_page(kaddr))
112
113extern int __virt_addr_valid(volatile void *kaddr);
114#define virt_addr_valid(kaddr)	__virt_addr_valid((volatile void *)(kaddr))
115
116#define VM_DATA_DEFAULT_FLAGS \
117	(VM_READ | VM_WRITE | \
118	 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
119	 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
120
121#include <asm-generic/memory_model.h>
122#include <asm-generic/getorder.h>
123
124#endif /* !__ASSEMBLY__ */
125
126#endif /* _ASM_PAGE_H */
127