1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6#ifndef __ASM_LOONGARCH_KVM_MMU_H__
7#define __ASM_LOONGARCH_KVM_MMU_H__
8
9#include <linux/kvm_host.h>
10#include <asm/pgalloc.h>
11#include <asm/tlb.h>
12
13/*
14 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
15 * for which pages need to be cached.
16 */
17#define KVM_MMU_CACHE_MIN_PAGES	(CONFIG_PGTABLE_LEVELS - 1)
18
19#define _KVM_FLUSH_PGTABLE	0x1
20#define _KVM_HAS_PGMASK		0x2
21#define kvm_pfn_pte(pfn, prot)	(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
22#define kvm_pte_pfn(x)		((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT))
23
24typedef unsigned long kvm_pte_t;
25typedef struct kvm_ptw_ctx kvm_ptw_ctx;
26typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx);
27
28struct kvm_ptw_ctx {
29	kvm_pte_ops     ops;
30	unsigned long   flag;
31
32	/* for kvm_arch_mmu_enable_log_dirty_pt_masked use */
33	unsigned long   mask;
34	unsigned long   gfn;
35
36	/* page walk mmu info */
37	unsigned int    level;
38	unsigned long   pgtable_shift;
39	unsigned long   invalid_entry;
40	unsigned long   *invalid_ptes;
41	unsigned int    *pte_shifts;
42	void		*opaque;
43
44	/* free pte table page list */
45	struct list_head list;
46};
47
48kvm_pte_t *kvm_pgd_alloc(void);
49
50static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
51{
52	WRITE_ONCE(*ptep, val);
53}
54
55static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
56static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
57static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
58static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
59
60static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
61{
62	return pte | _PAGE_ACCESSED;
63}
64
65static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
66{
67	return pte & ~_PAGE_ACCESSED;
68}
69
70static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
71{
72	return pte | _PAGE_DIRTY;
73}
74
75static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
76{
77	return pte & ~_PAGE_DIRTY;
78}
79
80static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
81{
82	return pte | _PAGE_HUGE;
83}
84
85static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
86{
87	return pte & ~_PAGE_HUGE;
88}
89
90static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
91{
92	return ctx->flag & _KVM_FLUSH_PGTABLE;
93}
94
95static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
96					phys_addr_t addr)
97{
98
99	return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
100}
101
102static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx,
103				phys_addr_t addr, phys_addr_t end)
104{
105	phys_addr_t boundary, size;
106
107	size = 0x1UL << ctx->pgtable_shift;
108	boundary = (addr + size) & ~(size - 1);
109	return (boundary - 1 < end - 1) ? boundary : end;
110}
111
112static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
113{
114	if (!ctx || ctx->level == 0)
115		return !!(*entry & _PAGE_PRESENT);
116
117	return *entry != ctx->invalid_entry;
118}
119
120static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
121{
122	return *entry == ctx->invalid_entry;
123}
124
125static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx)
126{
127	ctx->level--;
128	ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
129	ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
130}
131
132static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx)
133{
134	ctx->level++;
135	ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
136	ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
137}
138
139#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */
140