1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6#ifndef __ARM64_KVM_PKVM_H__
7#define __ARM64_KVM_PKVM_H__
8
9#include <linux/arm_ffa.h>
10#include <linux/memblock.h>
11#include <linux/scatterlist.h>
12#include <asm/kvm_pgtable.h>
13
14/* Maximum number of VMs that can co-exist under pKVM. */
15#define KVM_MAX_PVMS 255
16
17#define HYP_MEMBLOCK_REGIONS 128
18
19int pkvm_init_host_vm(struct kvm *kvm);
20int pkvm_create_hyp_vm(struct kvm *kvm);
21void pkvm_destroy_hyp_vm(struct kvm *kvm);
22
23extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
24extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
25
26static inline unsigned long
27hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
28{
29	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
30	unsigned long start, end;
31
32	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
33	end = start + nr_pages * vmemmap_entry_size;
34	start = ALIGN_DOWN(start, PAGE_SIZE);
35	end = ALIGN(end, PAGE_SIZE);
36
37	return end - start;
38}
39
40static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
41{
42	unsigned long res = 0, i;
43
44	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
45		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
46						 vmemmap_entry_size);
47	}
48
49	return res >> PAGE_SHIFT;
50}
51
52static inline unsigned long hyp_vm_table_pages(void)
53{
54	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
55}
56
57static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
58{
59	unsigned long total = 0;
60	int i;
61
62	/* Provision the worst case scenario */
63	for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
64		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
65		total += nr_pages;
66	}
67
68	return total;
69}
70
71static inline unsigned long __hyp_pgtable_total_pages(void)
72{
73	unsigned long res = 0, i;
74
75	/* Cover all of memory with page-granularity */
76	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
77		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
78		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
79	}
80
81	return res;
82}
83
84static inline unsigned long hyp_s1_pgtable_pages(void)
85{
86	unsigned long res;
87
88	res = __hyp_pgtable_total_pages();
89
90	/* Allow 1 GiB for private mappings */
91	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
92
93	return res;
94}
95
96static inline unsigned long host_s2_pgtable_pages(void)
97{
98	unsigned long res;
99
100	/*
101	 * Include an extra 16 pages to safely upper-bound the worst case of
102	 * concatenated pgds.
103	 */
104	res = __hyp_pgtable_total_pages() + 16;
105
106	/* Allow 1 GiB for MMIO mappings */
107	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
108
109	return res;
110}
111
112#define KVM_FFA_MBOX_NR_PAGES	1
113
114static inline unsigned long hyp_ffa_proxy_pages(void)
115{
116	size_t desc_max;
117
118	/*
119	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
120	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
121	 */
122	desc_max = sizeof(struct ffa_mem_region) +
123		   sizeof(struct ffa_mem_region_attributes) +
124		   sizeof(struct ffa_composite_mem_region) +
125		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
126
127	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
128	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
129}
130
131#endif	/* __ARM64_KVM_PKVM_H__ */
132