1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AArch64 code
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#include <linux/compiler.h>
9#include <assert.h>
10
11#include "guest_modes.h"
12#include "kvm_util.h"
13#include "processor.h"
14#include <linux/bitfield.h>
15#include <linux/sizes.h>
16
17#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
18
19static vm_vaddr_t exception_handlers;
20
21static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
22{
23	return (v + vm->page_size) & ~(vm->page_size - 1);
24}
25
26static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
27{
28	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
29	uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
30
31	return (gva >> shift) & mask;
32}
33
34static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
35{
36	unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
37	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
38
39	TEST_ASSERT(vm->pgtable_levels == 4,
40		"Mode %d does not have 4 page table levels", vm->mode);
41
42	return (gva >> shift) & mask;
43}
44
45static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
46{
47	unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
48	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
49
50	TEST_ASSERT(vm->pgtable_levels >= 3,
51		"Mode %d does not have >= 3 page table levels", vm->mode);
52
53	return (gva >> shift) & mask;
54}
55
56static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
57{
58	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
59	return (gva >> vm->page_shift) & mask;
60}
61
62static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
63{
64	return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) &&
65	    (vm->pa_bits > 48 || vm->va_bits > 48);
66}
67
68static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
69{
70	uint64_t pte;
71
72	if (use_lpa2_pte_format(vm)) {
73		pte = pa & GENMASK(49, vm->page_shift);
74		pte |= FIELD_GET(GENMASK(51, 50), pa) << 8;
75		attrs &= ~GENMASK(9, 8);
76	} else {
77		pte = pa & GENMASK(47, vm->page_shift);
78		if (vm->page_shift == 16)
79			pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
80	}
81	pte |= attrs;
82
83	return pte;
84}
85
86static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
87{
88	uint64_t pa;
89
90	if (use_lpa2_pte_format(vm)) {
91		pa = pte & GENMASK(49, vm->page_shift);
92		pa |= FIELD_GET(GENMASK(9, 8), pte) << 50;
93	} else {
94		pa = pte & GENMASK(47, vm->page_shift);
95		if (vm->page_shift == 16)
96			pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
97	}
98
99	return pa;
100}
101
102static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
103{
104	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
105	return 1 << (vm->va_bits - shift);
106}
107
108static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
109{
110	return 1 << (vm->page_shift - 3);
111}
112
113void virt_arch_pgd_alloc(struct kvm_vm *vm)
114{
115	size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
116
117	if (vm->pgd_created)
118		return;
119
120	vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
121				     KVM_GUEST_PAGE_TABLE_MIN_PADDR,
122				     vm->memslots[MEM_REGION_PT]);
123	vm->pgd_created = true;
124}
125
126static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
127			 uint64_t flags)
128{
129	uint8_t attr_idx = flags & 7;
130	uint64_t *ptep;
131
132	TEST_ASSERT((vaddr % vm->page_size) == 0,
133		"Virtual address not on page boundary,\n"
134		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
135	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
136		(vaddr >> vm->page_shift)),
137		"Invalid virtual address, vaddr: 0x%lx", vaddr);
138	TEST_ASSERT((paddr % vm->page_size) == 0,
139		"Physical address not on page boundary,\n"
140		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
141	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
142		"Physical address beyond beyond maximum supported,\n"
143		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
144		paddr, vm->max_gfn, vm->page_size);
145
146	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
147	if (!*ptep)
148		*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
149
150	switch (vm->pgtable_levels) {
151	case 4:
152		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
153		if (!*ptep)
154			*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
155		/* fall through */
156	case 3:
157		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
158		if (!*ptep)
159			*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
160		/* fall through */
161	case 2:
162		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
163		break;
164	default:
165		TEST_FAIL("Page table levels must be 2, 3, or 4");
166	}
167
168	*ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3);  /* AF */
169}
170
171void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
172{
173	uint64_t attr_idx = MT_NORMAL;
174
175	_virt_pg_map(vm, vaddr, paddr, attr_idx);
176}
177
178uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
179{
180	uint64_t *ptep;
181
182	if (!vm->pgd_created)
183		goto unmapped_gva;
184
185	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
186	if (!ptep)
187		goto unmapped_gva;
188
189	switch (vm->pgtable_levels) {
190	case 4:
191		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
192		if (!ptep)
193			goto unmapped_gva;
194		/* fall through */
195	case 3:
196		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
197		if (!ptep)
198			goto unmapped_gva;
199		/* fall through */
200	case 2:
201		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
202		if (!ptep)
203			goto unmapped_gva;
204		break;
205	default:
206		TEST_FAIL("Page table levels must be 2, 3, or 4");
207	}
208
209	return ptep;
210
211unmapped_gva:
212	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
213	exit(EXIT_FAILURE);
214}
215
216vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
217{
218	uint64_t *ptep = virt_get_pte_hva(vm, gva);
219
220	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
221}
222
223static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
224{
225#ifdef DEBUG
226	static const char * const type[] = { "", "pud", "pmd", "pte" };
227	uint64_t pte, *ptep;
228
229	if (level == 4)
230		return;
231
232	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
233		ptep = addr_gpa2hva(vm, pte);
234		if (!*ptep)
235			continue;
236		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
237		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
238	}
239#endif
240}
241
242void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
243{
244	int level = 4 - (vm->pgtable_levels - 1);
245	uint64_t pgd, *ptep;
246
247	if (!vm->pgd_created)
248		return;
249
250	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
251		ptep = addr_gpa2hva(vm, pgd);
252		if (!*ptep)
253			continue;
254		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
255		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
256	}
257}
258
259void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
260{
261	struct kvm_vcpu_init default_init = { .target = -1, };
262	struct kvm_vm *vm = vcpu->vm;
263	uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
264
265	if (!init)
266		init = &default_init;
267
268	if (init->target == -1) {
269		struct kvm_vcpu_init preferred;
270		vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
271		init->target = preferred.target;
272	}
273
274	vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
275
276	/*
277	 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
278	 * registers, which the variable argument list macros do.
279	 */
280	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
281
282	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
283	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
284
285	/* Configure base granule size */
286	switch (vm->mode) {
287	case VM_MODE_PXXV48_4K:
288		TEST_FAIL("AArch64 does not support 4K sized pages "
289			  "with ANY-bit physical address ranges");
290	case VM_MODE_P52V48_64K:
291	case VM_MODE_P48V48_64K:
292	case VM_MODE_P40V48_64K:
293	case VM_MODE_P36V48_64K:
294		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
295		break;
296	case VM_MODE_P52V48_16K:
297	case VM_MODE_P48V48_16K:
298	case VM_MODE_P40V48_16K:
299	case VM_MODE_P36V48_16K:
300	case VM_MODE_P36V47_16K:
301		tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
302		break;
303	case VM_MODE_P52V48_4K:
304	case VM_MODE_P48V48_4K:
305	case VM_MODE_P40V48_4K:
306	case VM_MODE_P36V48_4K:
307		tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
308		break;
309	default:
310		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
311	}
312
313	ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
314
315	/* Configure output size */
316	switch (vm->mode) {
317	case VM_MODE_P52V48_4K:
318	case VM_MODE_P52V48_16K:
319	case VM_MODE_P52V48_64K:
320		tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
321		ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
322		break;
323	case VM_MODE_P48V48_4K:
324	case VM_MODE_P48V48_16K:
325	case VM_MODE_P48V48_64K:
326		tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
327		break;
328	case VM_MODE_P40V48_4K:
329	case VM_MODE_P40V48_16K:
330	case VM_MODE_P40V48_64K:
331		tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
332		break;
333	case VM_MODE_P36V48_4K:
334	case VM_MODE_P36V48_16K:
335	case VM_MODE_P36V48_64K:
336	case VM_MODE_P36V47_16K:
337		tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
338		break;
339	default:
340		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
341	}
342
343	sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
344	/* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
345	tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
346	tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
347	if (use_lpa2_pte_format(vm))
348		tcr_el1 |= (1ul << 59) /* DS */;
349
350	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
351	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
352	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
353	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
354	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
355}
356
357void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
358{
359	uint64_t pstate, pc;
360
361	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
362	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
363
364	fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
365		indent, "", pstate, pc);
366}
367
368void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
369{
370	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
371}
372
373static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
374					   struct kvm_vcpu_init *init)
375{
376	size_t stack_size;
377	uint64_t stack_vaddr;
378	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
379
380	stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
381					     vm->page_size;
382	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
383				       DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
384				       MEM_REGION_DATA);
385
386	aarch64_vcpu_setup(vcpu, init);
387
388	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
389	return vcpu;
390}
391
392struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
393				  struct kvm_vcpu_init *init, void *guest_code)
394{
395	struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
396
397	vcpu_arch_set_entry_point(vcpu, guest_code);
398
399	return vcpu;
400}
401
402struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
403{
404	return __aarch64_vcpu_add(vm, vcpu_id, NULL);
405}
406
407void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
408{
409	va_list ap;
410	int i;
411
412	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
413		    "  num: %u", num);
414
415	va_start(ap, num);
416
417	for (i = 0; i < num; i++) {
418		vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
419			     va_arg(ap, uint64_t));
420	}
421
422	va_end(ap);
423}
424
425void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
426{
427	ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
428	while (1)
429		;
430}
431
432void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
433{
434	struct ucall uc;
435
436	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
437		return;
438
439	if (uc.args[2]) /* valid_ec */ {
440		assert(VECTOR_IS_SYNC(uc.args[0]));
441		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
442			  uc.args[0], uc.args[1]);
443	} else {
444		assert(!VECTOR_IS_SYNC(uc.args[0]));
445		TEST_FAIL("Unexpected exception (vector:0x%lx)",
446			  uc.args[0]);
447	}
448}
449
450struct handlers {
451	handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
452};
453
454void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
455{
456	extern char vectors;
457
458	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
459}
460
461void route_exception(struct ex_regs *regs, int vector)
462{
463	struct handlers *handlers = (struct handlers *)exception_handlers;
464	bool valid_ec;
465	int ec = 0;
466
467	switch (vector) {
468	case VECTOR_SYNC_CURRENT:
469	case VECTOR_SYNC_LOWER_64:
470		ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
471		valid_ec = true;
472		break;
473	case VECTOR_IRQ_CURRENT:
474	case VECTOR_IRQ_LOWER_64:
475	case VECTOR_FIQ_CURRENT:
476	case VECTOR_FIQ_LOWER_64:
477	case VECTOR_ERROR_CURRENT:
478	case VECTOR_ERROR_LOWER_64:
479		ec = 0;
480		valid_ec = false;
481		break;
482	default:
483		valid_ec = false;
484		goto unexpected_exception;
485	}
486
487	if (handlers && handlers->exception_handlers[vector][ec])
488		return handlers->exception_handlers[vector][ec](regs);
489
490unexpected_exception:
491	kvm_exit_unexpected_exception(vector, ec, valid_ec);
492}
493
494void vm_init_descriptor_tables(struct kvm_vm *vm)
495{
496	vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
497					vm->page_size, MEM_REGION_DATA);
498
499	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
500}
501
502void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
503			 void (*handler)(struct ex_regs *))
504{
505	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
506
507	assert(VECTOR_IS_SYNC(vector));
508	assert(vector < VECTOR_NUM);
509	assert(ec < ESR_EC_NUM);
510	handlers->exception_handlers[vector][ec] = handler;
511}
512
513void vm_install_exception_handler(struct kvm_vm *vm, int vector,
514			 void (*handler)(struct ex_regs *))
515{
516	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
517
518	assert(!VECTOR_IS_SYNC(vector));
519	assert(vector < VECTOR_NUM);
520	handlers->exception_handlers[vector][0] = handler;
521}
522
523uint32_t guest_get_vcpuid(void)
524{
525	return read_sysreg(tpidr_el1);
526}
527
528static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
529				uint32_t not_sup_val, uint32_t ipa52_min_val)
530{
531	if (gran == not_sup_val)
532		return 0;
533	else if (gran >= ipa52_min_val && vm_ipa >= 52)
534		return 52;
535	else
536		return min(vm_ipa, 48U);
537}
538
539void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
540					uint32_t *ipa16k, uint32_t *ipa64k)
541{
542	struct kvm_vcpu_init preferred_init;
543	int kvm_fd, vm_fd, vcpu_fd, err;
544	uint64_t val;
545	uint32_t gran;
546	struct kvm_one_reg reg = {
547		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
548		.addr	= (uint64_t)&val,
549	};
550
551	kvm_fd = open_kvm_dev_path_or_exit();
552	vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
553	TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
554
555	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
556	TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
557
558	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
559	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
560	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
561	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
562
563	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
564	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
565
566	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
567	*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
568					ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
569
570	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
571	*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
572					ID_AA64MMFR0_EL1_TGRAN64_IMP);
573
574	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
575	*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
576					ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
577
578	close(vcpu_fd);
579	close(vm_fd);
580	close(kvm_fd);
581}
582
583#define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,	\
584		     arg6, res)							\
585	asm volatile("mov   w0, %w[function_id]\n"				\
586		     "mov   x1, %[arg0]\n"					\
587		     "mov   x2, %[arg1]\n"					\
588		     "mov   x3, %[arg2]\n"					\
589		     "mov   x4, %[arg3]\n"					\
590		     "mov   x5, %[arg4]\n"					\
591		     "mov   x6, %[arg5]\n"					\
592		     "mov   x7, %[arg6]\n"					\
593		     #insn  "#0\n"						\
594		     "mov   %[res0], x0\n"					\
595		     "mov   %[res1], x1\n"					\
596		     "mov   %[res2], x2\n"					\
597		     "mov   %[res3], x3\n"					\
598		     : [res0] "=r"(res->a0), [res1] "=r"(res->a1),		\
599		       [res2] "=r"(res->a2), [res3] "=r"(res->a3)		\
600		     : [function_id] "r"(function_id), [arg0] "r"(arg0),	\
601		       [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),	\
602		       [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)	\
603		     : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
604
605
606void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
607	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
608	       uint64_t arg6, struct arm_smccc_res *res)
609{
610	__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
611		     arg6, res);
612}
613
614void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
615	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
616	       uint64_t arg6, struct arm_smccc_res *res)
617{
618	__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
619		     arg6, res);
620}
621
622void kvm_selftest_arch_init(void)
623{
624	/*
625	 * arm64 doesn't have a true default mode, so start by computing the
626	 * available IPA space and page sizes early.
627	 */
628	guest_modes_append_default();
629}
630
631void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
632{
633	/*
634	 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
635	 * is [0, 2^(64 - TCR_EL1.T0SZ)).
636	 */
637	sparsebit_set_num(vm->vpages_valid, 0,
638			  (1ULL << vm->va_bits) >> vm->page_shift);
639}
640