1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * vpmu_counter_access - Test vPMU event counter access
4 *
5 * Copyright (c) 2023 Google LLC.
6 *
7 * This test checks if the guest can see the same number of the PMU event
8 * counters (PMCR_EL0.N) that userspace sets, if the guest can access
9 * those counters, and if the guest is prevented from accessing any
10 * other counters.
11 * It also checks if the userspace accesses to the PMU regsisters honor the
12 * PMCR.N value that's set for the guest.
13 * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
14 */
15#include <kvm_util.h>
16#include <processor.h>
17#include <test_util.h>
18#include <vgic.h>
19#include <perf/arm_pmuv3.h>
20#include <linux/bitfield.h>
21
22/* The max number of the PMU event counters (excluding the cycle counter) */
23#define ARMV8_PMU_MAX_GENERAL_COUNTERS	(ARMV8_PMU_MAX_COUNTERS - 1)
24
25/* The cycle counter bit position that's common among the PMU registers */
26#define ARMV8_PMU_CYCLE_IDX		31
27
28struct vpmu_vm {
29	struct kvm_vm *vm;
30	struct kvm_vcpu *vcpu;
31	int gic_fd;
32};
33
34static struct vpmu_vm vpmu_vm;
35
36struct pmreg_sets {
37	uint64_t set_reg_id;
38	uint64_t clr_reg_id;
39};
40
41#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
42
43static uint64_t get_pmcr_n(uint64_t pmcr)
44{
45	return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
46}
47
48static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
49{
50	u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
51}
52
53static uint64_t get_counters_mask(uint64_t n)
54{
55	uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
56
57	if (n)
58		mask |= GENMASK(n - 1, 0);
59	return mask;
60}
61
62/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
63static inline unsigned long read_sel_evcntr(int sel)
64{
65	write_sysreg(sel, pmselr_el0);
66	isb();
67	return read_sysreg(pmxevcntr_el0);
68}
69
70/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
71static inline void write_sel_evcntr(int sel, unsigned long val)
72{
73	write_sysreg(sel, pmselr_el0);
74	isb();
75	write_sysreg(val, pmxevcntr_el0);
76	isb();
77}
78
79/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
80static inline unsigned long read_sel_evtyper(int sel)
81{
82	write_sysreg(sel, pmselr_el0);
83	isb();
84	return read_sysreg(pmxevtyper_el0);
85}
86
87/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
88static inline void write_sel_evtyper(int sel, unsigned long val)
89{
90	write_sysreg(sel, pmselr_el0);
91	isb();
92	write_sysreg(val, pmxevtyper_el0);
93	isb();
94}
95
96static void pmu_disable_reset(void)
97{
98	uint64_t pmcr = read_sysreg(pmcr_el0);
99
100	/* Reset all counters, disabling them */
101	pmcr &= ~ARMV8_PMU_PMCR_E;
102	write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
103	isb();
104}
105
106#define RETURN_READ_PMEVCNTRN(n) \
107	return read_sysreg(pmevcntr##n##_el0)
108static unsigned long read_pmevcntrn(int n)
109{
110	PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
111	return 0;
112}
113
114#define WRITE_PMEVCNTRN(n) \
115	write_sysreg(val, pmevcntr##n##_el0)
116static void write_pmevcntrn(int n, unsigned long val)
117{
118	PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
119	isb();
120}
121
122#define READ_PMEVTYPERN(n) \
123	return read_sysreg(pmevtyper##n##_el0)
124static unsigned long read_pmevtypern(int n)
125{
126	PMEVN_SWITCH(n, READ_PMEVTYPERN);
127	return 0;
128}
129
130#define WRITE_PMEVTYPERN(n) \
131	write_sysreg(val, pmevtyper##n##_el0)
132static void write_pmevtypern(int n, unsigned long val)
133{
134	PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
135	isb();
136}
137
138/*
139 * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
140 * accessors that test cases will use. Each of the accessors will
141 * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
142 * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
143 * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
144 *
145 * This is used to test that combinations of those accessors provide
146 * the consistent behavior.
147 */
148struct pmc_accessor {
149	/* A function to be used to read PMEVTCNTR<n>_EL0 */
150	unsigned long	(*read_cntr)(int idx);
151	/* A function to be used to write PMEVTCNTR<n>_EL0 */
152	void		(*write_cntr)(int idx, unsigned long val);
153	/* A function to be used to read PMEVTYPER<n>_EL0 */
154	unsigned long	(*read_typer)(int idx);
155	/* A function to be used to write PMEVTYPER<n>_EL0 */
156	void		(*write_typer)(int idx, unsigned long val);
157};
158
159struct pmc_accessor pmc_accessors[] = {
160	/* test with all direct accesses */
161	{ read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
162	/* test with all indirect accesses */
163	{ read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
164	/* read with direct accesses, and write with indirect accesses */
165	{ read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
166	/* read with indirect accesses, and write with direct accesses */
167	{ read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
168};
169
170/*
171 * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
172 * assuming that the pointer is one of the entries in pmc_accessors[].
173 */
174#define PMC_ACC_TO_IDX(acc)	(acc - &pmc_accessors[0])
175
176#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected)			 \
177{										 \
178	uint64_t _tval = read_sysreg(regname);					 \
179										 \
180	if (set_expected)							 \
181		__GUEST_ASSERT((_tval & mask),					 \
182				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
183				_tval, mask, set_expected);			 \
184	else									 \
185		__GUEST_ASSERT(!(_tval & mask),					 \
186				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
187				_tval, mask, set_expected);			 \
188}
189
190/*
191 * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
192 * are set or cleared as specified in @set_expected.
193 */
194static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
195{
196	GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
197	GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
198	GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
199	GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
200	GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
201	GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
202}
203
204/*
205 * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
206 * to the specified counter (@pmc_idx) can be read/written as expected.
207 * When @set_op is true, it tries to set the bit for the counter in
208 * those registers by writing the SET registers (the bit won't be set
209 * if the counter is not implemented though).
210 * Otherwise, it tries to clear the bits in the registers by writing
211 * the CLR registers.
212 * Then, it checks if the values indicated in the registers are as expected.
213 */
214static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
215{
216	uint64_t pmcr_n, test_bit = BIT(pmc_idx);
217	bool set_expected = false;
218
219	if (set_op) {
220		write_sysreg(test_bit, pmcntenset_el0);
221		write_sysreg(test_bit, pmintenset_el1);
222		write_sysreg(test_bit, pmovsset_el0);
223
224		/* The bit will be set only if the counter is implemented */
225		pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
226		set_expected = (pmc_idx < pmcr_n) ? true : false;
227	} else {
228		write_sysreg(test_bit, pmcntenclr_el0);
229		write_sysreg(test_bit, pmintenclr_el1);
230		write_sysreg(test_bit, pmovsclr_el0);
231	}
232	check_bitmap_pmu_regs(test_bit, set_expected);
233}
234
235/*
236 * Tests for reading/writing registers for the (implemented) event counter
237 * specified by @pmc_idx.
238 */
239static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
240{
241	uint64_t write_data, read_data;
242
243	/* Disable all PMCs and reset all PMCs to zero. */
244	pmu_disable_reset();
245
246	/*
247	 * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
248	 */
249
250	/* Make sure that the bit in those registers are set to 0 */
251	test_bitmap_pmu_regs(pmc_idx, false);
252	/* Test if setting the bit in those registers works */
253	test_bitmap_pmu_regs(pmc_idx, true);
254	/* Test if clearing the bit in those registers works */
255	test_bitmap_pmu_regs(pmc_idx, false);
256
257	/*
258	 * Tests for reading/writing the event type register.
259	 */
260
261	/*
262	 * Set the event type register to an arbitrary value just for testing
263	 * of reading/writing the register.
264	 * Arm ARM says that for the event from 0x0000 to 0x003F,
265	 * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
266	 * the value written to the field even when the specified event
267	 * is not supported.
268	 */
269	write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
270	acc->write_typer(pmc_idx, write_data);
271	read_data = acc->read_typer(pmc_idx);
272	__GUEST_ASSERT(read_data == write_data,
273		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
274		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
275
276	/*
277	 * Tests for reading/writing the event count register.
278	 */
279
280	read_data = acc->read_cntr(pmc_idx);
281
282	/* The count value must be 0, as it is disabled and reset */
283	__GUEST_ASSERT(read_data == 0,
284		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
285		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
286
287	write_data = read_data + pmc_idx + 0x12345;
288	acc->write_cntr(pmc_idx, write_data);
289	read_data = acc->read_cntr(pmc_idx);
290	__GUEST_ASSERT(read_data == write_data,
291		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
292		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
293}
294
295#define INVALID_EC	(-1ul)
296uint64_t expected_ec = INVALID_EC;
297
298static void guest_sync_handler(struct ex_regs *regs)
299{
300	uint64_t esr, ec;
301
302	esr = read_sysreg(esr_el1);
303	ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
304
305	__GUEST_ASSERT(expected_ec == ec,
306			"PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
307			regs->pc, esr, ec, expected_ec);
308
309	/* skip the trapping instruction */
310	regs->pc += 4;
311
312	/* Use INVALID_EC to indicate an exception occurred */
313	expected_ec = INVALID_EC;
314}
315
316/*
317 * Run the given operation that should trigger an exception with the
318 * given exception class. The exception handler (guest_sync_handler)
319 * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
320 * the instruction that trapped.
321 */
322#define TEST_EXCEPTION(ec, ops)				\
323({							\
324	GUEST_ASSERT(ec != INVALID_EC);			\
325	WRITE_ONCE(expected_ec, ec);			\
326	dsb(ish);					\
327	ops;						\
328	GUEST_ASSERT(expected_ec == INVALID_EC);	\
329})
330
331/*
332 * Tests for reading/writing registers for the unimplemented event counter
333 * specified by @pmc_idx (>= PMCR_EL0.N).
334 */
335static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
336{
337	/*
338	 * Reading/writing the event count/type registers should cause
339	 * an UNDEFINED exception.
340	 */
341	TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
342	TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
343	TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
344	TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
345	/*
346	 * The bit corresponding to the (unimplemented) counter in
347	 * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
348	 */
349	test_bitmap_pmu_regs(pmc_idx, 1);
350	test_bitmap_pmu_regs(pmc_idx, 0);
351}
352
353/*
354 * The guest is configured with PMUv3 with @expected_pmcr_n number of
355 * event counters.
356 * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
357 * if reading/writing PMU registers for implemented or unimplemented
358 * counters works as expected.
359 */
360static void guest_code(uint64_t expected_pmcr_n)
361{
362	uint64_t pmcr, pmcr_n, unimp_mask;
363	int i, pmc;
364
365	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
366			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
367			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
368
369	pmcr = read_sysreg(pmcr_el0);
370	pmcr_n = get_pmcr_n(pmcr);
371
372	/* Make sure that PMCR_EL0.N indicates the value userspace set */
373	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
374			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
375			expected_pmcr_n, pmcr_n);
376
377	/*
378	 * Make sure that (RAZ) bits corresponding to unimplemented event
379	 * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
380	 * to zero.
381	 * (NOTE: bits for implemented event counters are reset to UNKNOWN)
382	 */
383	unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
384	check_bitmap_pmu_regs(unimp_mask, false);
385
386	/*
387	 * Tests for reading/writing PMU registers for implemented counters.
388	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
389	 */
390	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
391		for (pmc = 0; pmc < pmcr_n; pmc++)
392			test_access_pmc_regs(&pmc_accessors[i], pmc);
393	}
394
395	/*
396	 * Tests for reading/writing PMU registers for unimplemented counters.
397	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
398	 */
399	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
400		for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
401			test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
402	}
403
404	GUEST_DONE();
405}
406
407#define GICD_BASE_GPA	0x8000000ULL
408#define GICR_BASE_GPA	0x80A0000ULL
409
410/* Create a VM that has one vCPU with PMUv3 configured. */
411static void create_vpmu_vm(void *guest_code)
412{
413	struct kvm_vcpu_init init;
414	uint8_t pmuver, ec;
415	uint64_t dfr0, irq = 23;
416	struct kvm_device_attr irq_attr = {
417		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
418		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
419		.addr = (uint64_t)&irq,
420	};
421	struct kvm_device_attr init_attr = {
422		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
423		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
424	};
425
426	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
427	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
428
429	vpmu_vm.vm = vm_create(1);
430	vm_init_descriptor_tables(vpmu_vm.vm);
431	for (ec = 0; ec < ESR_EC_NUM; ec++) {
432		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
433					guest_sync_handler);
434	}
435
436	/* Create vCPU with PMUv3 */
437	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
438	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
439	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
440	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
441	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
442					GICD_BASE_GPA, GICR_BASE_GPA);
443	__TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
444		       "Failed to create vgic-v3, skipping");
445
446	/* Make sure that PMUv3 support is indicated in the ID register */
447	vcpu_get_reg(vpmu_vm.vcpu,
448		     KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
449	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
450	TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
451		    pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
452		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
453
454	/* Initialize vPMU */
455	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
456	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
457}
458
459static void destroy_vpmu_vm(void)
460{
461	close(vpmu_vm.gic_fd);
462	kvm_vm_free(vpmu_vm.vm);
463}
464
465static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
466{
467	struct ucall uc;
468
469	vcpu_args_set(vcpu, 1, pmcr_n);
470	vcpu_run(vcpu);
471	switch (get_ucall(vcpu, &uc)) {
472	case UCALL_ABORT:
473		REPORT_GUEST_ASSERT(uc);
474		break;
475	case UCALL_DONE:
476		break;
477	default:
478		TEST_FAIL("Unknown ucall %lu", uc.cmd);
479		break;
480	}
481}
482
483static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
484{
485	struct kvm_vcpu *vcpu;
486	uint64_t pmcr, pmcr_orig;
487
488	create_vpmu_vm(guest_code);
489	vcpu = vpmu_vm.vcpu;
490
491	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
492	pmcr = pmcr_orig;
493
494	/*
495	 * Setting a larger value of PMCR.N should not modify the field, and
496	 * return a success.
497	 */
498	set_pmcr_n(&pmcr, pmcr_n);
499	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
500	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
501
502	if (expect_fail)
503		TEST_ASSERT(pmcr_orig == pmcr,
504			    "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
505			    pmcr, pmcr_n);
506	else
507		TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
508			    "Failed to update PMCR.N to %lu (received: %lu)",
509			    pmcr_n, get_pmcr_n(pmcr));
510}
511
512/*
513 * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
514 * and run the test.
515 */
516static void run_access_test(uint64_t pmcr_n)
517{
518	uint64_t sp;
519	struct kvm_vcpu *vcpu;
520	struct kvm_vcpu_init init;
521
522	pr_debug("Test with pmcr_n %lu\n", pmcr_n);
523
524	test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
525	vcpu = vpmu_vm.vcpu;
526
527	/* Save the initial sp to restore them later to run the guest again */
528	vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
529
530	run_vcpu(vcpu, pmcr_n);
531
532	/*
533	 * Reset and re-initialize the vCPU, and run the guest code again to
534	 * check if PMCR_EL0.N is preserved.
535	 */
536	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
537	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
538	aarch64_vcpu_setup(vcpu, &init);
539	vcpu_init_descriptor_tables(vcpu);
540	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
541	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
542
543	run_vcpu(vcpu, pmcr_n);
544
545	destroy_vpmu_vm();
546}
547
548static struct pmreg_sets validity_check_reg_sets[] = {
549	PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
550	PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
551	PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
552};
553
554/*
555 * Create a VM, and check if KVM handles the userspace accesses of
556 * the PMU register sets in @validity_check_reg_sets[] correctly.
557 */
558static void run_pmregs_validity_test(uint64_t pmcr_n)
559{
560	int i;
561	struct kvm_vcpu *vcpu;
562	uint64_t set_reg_id, clr_reg_id, reg_val;
563	uint64_t valid_counters_mask, max_counters_mask;
564
565	test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
566	vcpu = vpmu_vm.vcpu;
567
568	valid_counters_mask = get_counters_mask(pmcr_n);
569	max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
570
571	for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
572		set_reg_id = validity_check_reg_sets[i].set_reg_id;
573		clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
574
575		/*
576		 * Test if the 'set' and 'clr' variants of the registers
577		 * are initialized based on the number of valid counters.
578		 */
579		vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
580		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
581			    "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
582			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
583
584		vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
585		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
586			    "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
587			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
588
589		/*
590		 * Using the 'set' variant, force-set the register to the
591		 * max number of possible counters and test if KVM discards
592		 * the bits for unimplemented counters as it should.
593		 */
594		vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
595
596		vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
597		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
598			    "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
599			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
600
601		vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
602		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
603			    "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
604			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
605	}
606
607	destroy_vpmu_vm();
608}
609
610/*
611 * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
612 * the vCPU to @pmcr_n, which is larger than the host value.
613 * The attempt should fail as @pmcr_n is too big to set for the vCPU.
614 */
615static void run_error_test(uint64_t pmcr_n)
616{
617	pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
618
619	test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
620	destroy_vpmu_vm();
621}
622
623/*
624 * Return the default number of implemented PMU event counters excluding
625 * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
626 */
627static uint64_t get_pmcr_n_limit(void)
628{
629	uint64_t pmcr;
630
631	create_vpmu_vm(guest_code);
632	vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
633	destroy_vpmu_vm();
634	return get_pmcr_n(pmcr);
635}
636
637int main(void)
638{
639	uint64_t i, pmcr_n;
640
641	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
642
643	pmcr_n = get_pmcr_n_limit();
644	for (i = 0; i <= pmcr_n; i++) {
645		run_access_test(i);
646		run_pmregs_validity_test(i);
647	}
648
649	for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
650		run_error_test(i);
651
652	return 0;
653}
654