1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7#ifndef __ARM64_KVM_FIXED_CONFIG_H__
8#define __ARM64_KVM_FIXED_CONFIG_H__
9
10#include <asm/sysreg.h>
11
12/*
13 * This file contains definitions for features to be allowed or restricted for
14 * guest virtual machines, depending on the mode KVM is running in and on the
15 * type of guest that is running.
16 *
17 * The ALLOW masks represent a bitmask of feature fields that are allowed
18 * without any restrictions as long as they are supported by the system.
19 *
20 * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
21 * features that are restricted to support at most the specified feature.
22 *
23 * If a feature field is not present in either, than it is not supported.
24 *
25 * The approach taken for protected VMs is to allow features that are:
26 * - Needed by common Linux distributions (e.g., floating point)
27 * - Trivial to support, e.g., supporting the feature does not introduce or
28 * require tracking of additional state in KVM
29 * - Cannot be trapped or prevent the guest from using anyway
30 */
31
32/*
33 * Allow for protected VMs:
34 * - Floating-point and Advanced SIMD
35 * - Data Independent Timing
36 * - Spectre/Meltdown Mitigation
37 */
38#define PVM_ID_AA64PFR0_ALLOW (\
39	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
40	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
41	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
42	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
43	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
44	)
45
46/*
47 * Restrict to the following *unsigned* features for protected VMs:
48 * - AArch64 guests only (no support for AArch32 guests):
49 *	AArch32 adds complexity in trap handling, emulation, condition codes,
50 *	etc...
51 * - RAS (v1)
52 *	Supported by KVM
53 */
54#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
55	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
56	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
57	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
58	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
59	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
60	)
61
62/*
63 * Allow for protected VMs:
64 * - Branch Target Identification
65 * - Speculative Store Bypassing
66 */
67#define PVM_ID_AA64PFR1_ALLOW (\
68	ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
69	ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
70	)
71
72#define PVM_ID_AA64PFR2_ALLOW 0ULL
73
74/*
75 * Allow for protected VMs:
76 * - Mixed-endian
77 * - Distinction between Secure and Non-secure Memory
78 * - Mixed-endian at EL0 only
79 * - Non-context synchronizing exception entry and exit
80 */
81#define PVM_ID_AA64MMFR0_ALLOW (\
82	ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
83	ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
84	ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
85	ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
86	)
87
88/*
89 * Restrict to the following *unsigned* features for protected VMs:
90 * - 40-bit IPA
91 * - 16-bit ASID
92 */
93#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
94	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
95	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
96	)
97
98/*
99 * Allow for protected VMs:
100 * - Hardware translation table updates to Access flag and Dirty state
101 * - Number of VMID bits from CPU
102 * - Hierarchical Permission Disables
103 * - Privileged Access Never
104 * - SError interrupt exceptions from speculative reads
105 * - Enhanced Translation Synchronization
106 * - Control for cache maintenance permission
107 */
108#define PVM_ID_AA64MMFR1_ALLOW (\
109	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
110	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
111	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
112	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
113	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
114	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) | \
115	ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_CMOW) \
116	)
117
118/*
119 * Allow for protected VMs:
120 * - Common not Private translations
121 * - User Access Override
122 * - IESB bit in the SCTLR_ELx registers
123 * - Unaligned single-copy atomicity and atomic functions
124 * - ESR_ELx.EC value on an exception by read access to feature ID space
125 * - TTL field in address operations.
126 * - Break-before-make sequences when changing translation block size
127 * - E0PDx mechanism
128 */
129#define PVM_ID_AA64MMFR2_ALLOW (\
130	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
131	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
132	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
133	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
134	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
135	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
136	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
137	ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
138	)
139
140#define PVM_ID_AA64MMFR3_ALLOW (0ULL)
141
142/*
143 * No support for Scalable Vectors for protected VMs:
144 *	Requires additional support from KVM, e.g., context-switching and
145 *	trapping at EL2
146 */
147#define PVM_ID_AA64ZFR0_ALLOW (0ULL)
148
149/*
150 * No support for debug, including breakpoints, and watchpoints for protected
151 * VMs:
152 *	The Arm architecture mandates support for at least the Armv8 debug
153 *	architecture, which would include at least 2 hardware breakpoints and
154 *	watchpoints. Providing that support to protected guests adds
155 *	considerable state and complexity. Therefore, the reserved value of 0 is
156 *	used for debug-related fields.
157 */
158#define PVM_ID_AA64DFR0_ALLOW (0ULL)
159#define PVM_ID_AA64DFR1_ALLOW (0ULL)
160
161/*
162 * No support for implementation defined features.
163 */
164#define PVM_ID_AA64AFR0_ALLOW (0ULL)
165#define PVM_ID_AA64AFR1_ALLOW (0ULL)
166
167/*
168 * No restrictions on instructions implemented in AArch64.
169 */
170#define PVM_ID_AA64ISAR0_ALLOW (\
171	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \
172	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \
173	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \
174	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \
175	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \
176	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \
177	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \
178	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \
179	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \
180	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \
181	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \
182	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \
183	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \
184	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
185	)
186
187/* Restrict pointer authentication to the basic version. */
188#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\
189	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
190	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
191	)
192
193#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\
194	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
195	)
196
197#define PVM_ID_AA64ISAR1_ALLOW (\
198	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
199	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
200	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \
201	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \
202	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \
203	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \
204	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \
205	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \
206	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
207	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
208	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
209	ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
210	)
211
212#define PVM_ID_AA64ISAR2_ALLOW (\
213	ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \
214	ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
215	ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
216	)
217
218u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
219bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
220bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
221int kvm_check_pvm_sysreg_table(void);
222
223#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
224