1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vgic_irq.c - Test userspace injection of IRQs
4 *
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
8 * it received it.
9 */
10#include <asm/kvm.h>
11#include <asm/kvm_para.h>
12#include <sys/eventfd.h>
13#include <linux/sizes.h>
14
15#include "processor.h"
16#include "test_util.h"
17#include "kvm_util.h"
18#include "gic.h"
19#include "gic_v3.h"
20#include "vgic.h"
21
22#define GICD_BASE_GPA		0x08000000ULL
23#define GICR_BASE_GPA		0x080A0000ULL
24
25/*
26 * Stores the user specified args; it's passed to the guest and to every test
27 * function.
28 */
29struct test_args {
30	uint32_t nr_irqs; /* number of KVM supported IRQs. */
31	bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
32	bool level_sensitive; /* 1 is level, 0 is edge */
33	int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
34	bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
35};
36
37/*
38 * KVM implements 32 priority levels:
39 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
40 *
41 * Note that these macros will still be correct in the case that KVM implements
42 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
43 */
44#define KVM_NUM_PRIOS		32
45#define KVM_PRIO_SHIFT		3 /* steps of 8 = 1 << 3 */
46#define KVM_PRIO_STEPS		(1 << KVM_PRIO_SHIFT) /* 8 */
47#define LOWEST_PRIO		(KVM_NUM_PRIOS - 1)
48#define CPU_PRIO_MASK		(LOWEST_PRIO << KVM_PRIO_SHIFT)	/* 0xf8 */
49#define IRQ_DEFAULT_PRIO	(LOWEST_PRIO - 1)
50#define IRQ_DEFAULT_PRIO_REG	(IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
51
52static void *dist = (void *)GICD_BASE_GPA;
53static void *redist = (void *)GICR_BASE_GPA;
54
55/*
56 * The kvm_inject_* utilities are used by the guest to ask the host to inject
57 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
58 */
59
60typedef enum {
61	KVM_INJECT_EDGE_IRQ_LINE = 1,
62	KVM_SET_IRQ_LINE,
63	KVM_SET_IRQ_LINE_HIGH,
64	KVM_SET_LEVEL_INFO_HIGH,
65	KVM_INJECT_IRQFD,
66	KVM_WRITE_ISPENDR,
67	KVM_WRITE_ISACTIVER,
68} kvm_inject_cmd;
69
70struct kvm_inject_args {
71	kvm_inject_cmd cmd;
72	uint32_t first_intid;
73	uint32_t num;
74	int level;
75	bool expect_failure;
76};
77
78/* Used on the guest side to perform the hypercall. */
79static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
80		uint32_t num, int level, bool expect_failure);
81
82/* Used on the host side to get the hypercall info. */
83static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
84		struct kvm_inject_args *args);
85
86#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure)			\
87	kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
88
89#define KVM_INJECT_MULTI(cmd, intid, num)					\
90	_KVM_INJECT_MULTI(cmd, intid, num, false)
91
92#define _KVM_INJECT(cmd, intid, expect_failure)					\
93	_KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
94
95#define KVM_INJECT(cmd, intid)							\
96	_KVM_INJECT_MULTI(cmd, intid, 1, false)
97
98#define KVM_ACTIVATE(cmd, intid)						\
99	kvm_inject_call(cmd, intid, 1, 1, false);
100
101struct kvm_inject_desc {
102	kvm_inject_cmd cmd;
103	/* can inject PPIs, PPIs, and/or SPIs. */
104	bool sgi, ppi, spi;
105};
106
107static struct kvm_inject_desc inject_edge_fns[] = {
108	/*                                      sgi    ppi    spi */
109	{ KVM_INJECT_EDGE_IRQ_LINE,		false, false, true },
110	{ KVM_INJECT_IRQFD,			false, false, true },
111	{ KVM_WRITE_ISPENDR,			true,  false, true },
112	{ 0, },
113};
114
115static struct kvm_inject_desc inject_level_fns[] = {
116	/*                                      sgi    ppi    spi */
117	{ KVM_SET_IRQ_LINE_HIGH,		false, true,  true },
118	{ KVM_SET_LEVEL_INFO_HIGH,		false, true,  true },
119	{ KVM_INJECT_IRQFD,			false, false, true },
120	{ KVM_WRITE_ISPENDR,			false, true,  true },
121	{ 0, },
122};
123
124static struct kvm_inject_desc set_active_fns[] = {
125	/*                                      sgi    ppi    spi */
126	{ KVM_WRITE_ISACTIVER,			true,  true,  true },
127	{ 0, },
128};
129
130#define for_each_inject_fn(t, f)						\
131	for ((f) = (t); (f)->cmd; (f)++)
132
133#define for_each_supported_inject_fn(args, t, f)				\
134	for_each_inject_fn(t, f)						\
135		if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
136
137#define for_each_supported_activate_fn(args, t, f)				\
138	for_each_supported_inject_fn((args), (t), (f))
139
140/* Shared between the guest main thread and the IRQ handlers. */
141volatile uint64_t irq_handled;
142volatile uint32_t irqnr_received[MAX_SPI + 1];
143
144static void reset_stats(void)
145{
146	int i;
147
148	irq_handled = 0;
149	for (i = 0; i <= MAX_SPI; i++)
150		irqnr_received[i] = 0;
151}
152
153static uint64_t gic_read_ap1r0(void)
154{
155	uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
156
157	dsb(sy);
158	return reg;
159}
160
161static void gic_write_ap1r0(uint64_t val)
162{
163	write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
164	isb();
165}
166
167static void guest_set_irq_line(uint32_t intid, uint32_t level);
168
169static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
170{
171	uint32_t intid = gic_get_and_ack_irq();
172
173	if (intid == IAR_SPURIOUS)
174		return;
175
176	GUEST_ASSERT(gic_irq_get_active(intid));
177
178	if (!level_sensitive)
179		GUEST_ASSERT(!gic_irq_get_pending(intid));
180
181	if (level_sensitive)
182		guest_set_irq_line(intid, 0);
183
184	GUEST_ASSERT(intid < MAX_SPI);
185	irqnr_received[intid] += 1;
186	irq_handled += 1;
187
188	gic_set_eoi(intid);
189	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
190	if (eoi_split)
191		gic_set_dir(intid);
192
193	GUEST_ASSERT(!gic_irq_get_active(intid));
194	GUEST_ASSERT(!gic_irq_get_pending(intid));
195}
196
197static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
198		uint32_t num, int level, bool expect_failure)
199{
200	struct kvm_inject_args args = {
201		.cmd = cmd,
202		.first_intid = first_intid,
203		.num = num,
204		.level = level,
205		.expect_failure = expect_failure,
206	};
207	GUEST_SYNC(&args);
208}
209
210#define GUEST_ASSERT_IAR_EMPTY()						\
211do { 										\
212	uint32_t _intid;							\
213	_intid = gic_get_and_ack_irq();						\
214	GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS);			\
215} while (0)
216
217#define CAT_HELPER(a, b) a ## b
218#define CAT(a, b) CAT_HELPER(a, b)
219#define PREFIX guest_irq_handler_
220#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
221#define GENERATE_GUEST_IRQ_HANDLER(split, lev)					\
222static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs)			\
223{										\
224	guest_irq_generic_handler(split, lev);					\
225}
226
227GENERATE_GUEST_IRQ_HANDLER(0, 0);
228GENERATE_GUEST_IRQ_HANDLER(0, 1);
229GENERATE_GUEST_IRQ_HANDLER(1, 0);
230GENERATE_GUEST_IRQ_HANDLER(1, 1);
231
232static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
233	{GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
234	{GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
235};
236
237static void reset_priorities(struct test_args *args)
238{
239	int i;
240
241	for (i = 0; i < args->nr_irqs; i++)
242		gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
243}
244
245static void guest_set_irq_line(uint32_t intid, uint32_t level)
246{
247	kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
248}
249
250static void test_inject_fail(struct test_args *args,
251		uint32_t intid, kvm_inject_cmd cmd)
252{
253	reset_stats();
254
255	_KVM_INJECT(cmd, intid, true);
256	/* no IRQ to handle on entry */
257
258	GUEST_ASSERT_EQ(irq_handled, 0);
259	GUEST_ASSERT_IAR_EMPTY();
260}
261
262static void guest_inject(struct test_args *args,
263		uint32_t first_intid, uint32_t num,
264		kvm_inject_cmd cmd)
265{
266	uint32_t i;
267
268	reset_stats();
269
270	/* Cycle over all priorities to make things more interesting. */
271	for (i = first_intid; i < num + first_intid; i++)
272		gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
273
274	asm volatile("msr daifset, #2" : : : "memory");
275	KVM_INJECT_MULTI(cmd, first_intid, num);
276
277	while (irq_handled < num) {
278		asm volatile("wfi\n"
279			     "msr daifclr, #2\n"
280			     /* handle IRQ */
281			     "msr daifset, #2\n"
282			     : : : "memory");
283	}
284	asm volatile("msr daifclr, #2" : : : "memory");
285
286	GUEST_ASSERT_EQ(irq_handled, num);
287	for (i = first_intid; i < num + first_intid; i++)
288		GUEST_ASSERT_EQ(irqnr_received[i], 1);
289	GUEST_ASSERT_IAR_EMPTY();
290
291	reset_priorities(args);
292}
293
294/*
295 * Restore the active state of multiple concurrent IRQs (given by
296 * concurrent_irqs).  This does what a live-migration would do on the
297 * destination side assuming there are some active IRQs that were not
298 * deactivated yet.
299 */
300static void guest_restore_active(struct test_args *args,
301		uint32_t first_intid, uint32_t num,
302		kvm_inject_cmd cmd)
303{
304	uint32_t prio, intid, ap1r;
305	int i;
306
307	/*
308	 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
309	 * in descending order, so intid+1 can preempt intid.
310	 */
311	for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
312		GUEST_ASSERT(prio >= 0);
313		intid = i + first_intid;
314		gic_set_priority(intid, prio);
315	}
316
317	/*
318	 * In a real migration, KVM would restore all GIC state before running
319	 * guest code.
320	 */
321	for (i = 0; i < num; i++) {
322		intid = i + first_intid;
323		KVM_ACTIVATE(cmd, intid);
324		ap1r = gic_read_ap1r0();
325		ap1r |= 1U << i;
326		gic_write_ap1r0(ap1r);
327	}
328
329	/* This is where the "migration" would occur. */
330
331	/* finish handling the IRQs starting with the highest priority one. */
332	for (i = 0; i < num; i++) {
333		intid = num - i - 1 + first_intid;
334		gic_set_eoi(intid);
335		if (args->eoi_split)
336			gic_set_dir(intid);
337	}
338
339	for (i = 0; i < num; i++)
340		GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
341	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
342	GUEST_ASSERT_IAR_EMPTY();
343}
344
345/*
346 * Polls the IAR until it's not a spurious interrupt.
347 *
348 * This function should only be used in test_inject_preemption (with IRQs
349 * masked).
350 */
351static uint32_t wait_for_and_activate_irq(void)
352{
353	uint32_t intid;
354
355	do {
356		asm volatile("wfi" : : : "memory");
357		intid = gic_get_and_ack_irq();
358	} while (intid == IAR_SPURIOUS);
359
360	return intid;
361}
362
363/*
364 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
365 * handle them without handling the actual exceptions.  This is done by masking
366 * interrupts for the whole test.
367 */
368static void test_inject_preemption(struct test_args *args,
369		uint32_t first_intid, int num,
370		kvm_inject_cmd cmd)
371{
372	uint32_t intid, prio, step = KVM_PRIO_STEPS;
373	int i;
374
375	/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
376	 * in descending order, so intid+1 can preempt intid.
377	 */
378	for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
379		GUEST_ASSERT(prio >= 0);
380		intid = i + first_intid;
381		gic_set_priority(intid, prio);
382	}
383
384	local_irq_disable();
385
386	for (i = 0; i < num; i++) {
387		uint32_t tmp;
388		intid = i + first_intid;
389		KVM_INJECT(cmd, intid);
390		/* Each successive IRQ will preempt the previous one. */
391		tmp = wait_for_and_activate_irq();
392		GUEST_ASSERT_EQ(tmp, intid);
393		if (args->level_sensitive)
394			guest_set_irq_line(intid, 0);
395	}
396
397	/* finish handling the IRQs starting with the highest priority one. */
398	for (i = 0; i < num; i++) {
399		intid = num - i - 1 + first_intid;
400		gic_set_eoi(intid);
401		if (args->eoi_split)
402			gic_set_dir(intid);
403	}
404
405	local_irq_enable();
406
407	for (i = 0; i < num; i++)
408		GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
409	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
410	GUEST_ASSERT_IAR_EMPTY();
411
412	reset_priorities(args);
413}
414
415static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
416{
417	uint32_t nr_irqs = args->nr_irqs;
418
419	if (f->sgi) {
420		guest_inject(args, MIN_SGI, 1, f->cmd);
421		guest_inject(args, 0, 16, f->cmd);
422	}
423
424	if (f->ppi)
425		guest_inject(args, MIN_PPI, 1, f->cmd);
426
427	if (f->spi) {
428		guest_inject(args, MIN_SPI, 1, f->cmd);
429		guest_inject(args, nr_irqs - 1, 1, f->cmd);
430		guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
431	}
432}
433
434static void test_injection_failure(struct test_args *args,
435		struct kvm_inject_desc *f)
436{
437	uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
438	int i;
439
440	for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
441		test_inject_fail(args, bad_intid[i], f->cmd);
442}
443
444static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
445{
446	/*
447	 * Test up to 4 levels of preemption. The reason is that KVM doesn't
448	 * currently implement the ability to have more than the number-of-LRs
449	 * number of concurrently active IRQs. The number of LRs implemented is
450	 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
451	 */
452	if (f->sgi)
453		test_inject_preemption(args, MIN_SGI, 4, f->cmd);
454
455	if (f->ppi)
456		test_inject_preemption(args, MIN_PPI, 4, f->cmd);
457
458	if (f->spi)
459		test_inject_preemption(args, MIN_SPI, 4, f->cmd);
460}
461
462static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
463{
464	/* Test up to 4 active IRQs. Same reason as in test_preemption. */
465	if (f->sgi)
466		guest_restore_active(args, MIN_SGI, 4, f->cmd);
467
468	if (f->ppi)
469		guest_restore_active(args, MIN_PPI, 4, f->cmd);
470
471	if (f->spi)
472		guest_restore_active(args, MIN_SPI, 4, f->cmd);
473}
474
475static void guest_code(struct test_args *args)
476{
477	uint32_t i, nr_irqs = args->nr_irqs;
478	bool level_sensitive = args->level_sensitive;
479	struct kvm_inject_desc *f, *inject_fns;
480
481	gic_init(GIC_V3, 1, dist, redist);
482
483	for (i = 0; i < nr_irqs; i++)
484		gic_irq_enable(i);
485
486	for (i = MIN_SPI; i < nr_irqs; i++)
487		gic_irq_set_config(i, !level_sensitive);
488
489	gic_set_eoi_split(args->eoi_split);
490
491	reset_priorities(args);
492	gic_set_priority_mask(CPU_PRIO_MASK);
493
494	inject_fns  = level_sensitive ? inject_level_fns
495				      : inject_edge_fns;
496
497	local_irq_enable();
498
499	/* Start the tests. */
500	for_each_supported_inject_fn(args, inject_fns, f) {
501		test_injection(args, f);
502		test_preemption(args, f);
503		test_injection_failure(args, f);
504	}
505
506	/*
507	 * Restore the active state of IRQs. This would happen when live
508	 * migrating IRQs in the middle of being handled.
509	 */
510	for_each_supported_activate_fn(args, set_active_fns, f)
511		test_restore_active(args, f);
512
513	GUEST_DONE();
514}
515
516static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
517			struct test_args *test_args, bool expect_failure)
518{
519	int ret;
520
521	if (!expect_failure) {
522		kvm_arm_irq_line(vm, intid, level);
523	} else {
524		/* The interface doesn't allow larger intid's. */
525		if (intid > KVM_ARM_IRQ_NUM_MASK)
526			return;
527
528		ret = _kvm_arm_irq_line(vm, intid, level);
529		TEST_ASSERT(ret != 0 && errno == EINVAL,
530				"Bad intid %i did not cause KVM_IRQ_LINE "
531				"error: rc: %i errno: %i", intid, ret, errno);
532	}
533}
534
535void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
536			bool expect_failure)
537{
538	if (!expect_failure) {
539		kvm_irq_set_level_info(gic_fd, intid, level);
540	} else {
541		int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
542		/*
543		 * The kernel silently fails for invalid SPIs and SGIs (which
544		 * are not level-sensitive). It only checks for intid to not
545		 * spill over 1U << 10 (the max reserved SPI). Also, callers
546		 * are supposed to mask the intid with 0x3ff (1023).
547		 */
548		if (intid > VGIC_MAX_RESERVED)
549			TEST_ASSERT(ret != 0 && errno == EINVAL,
550				"Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
551				"error: rc: %i errno: %i", intid, ret, errno);
552		else
553			TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
554				"for intid %i failed, rc: %i errno: %i",
555				intid, ret, errno);
556	}
557}
558
559static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
560		uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
561		bool expect_failure)
562{
563	struct kvm_irq_routing *routing;
564	int ret;
565	uint64_t i;
566
567	assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
568
569	routing = kvm_gsi_routing_create();
570	for (i = intid; i < (uint64_t)intid + num; i++)
571		kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
572
573	if (!expect_failure) {
574		kvm_gsi_routing_write(vm, routing);
575	} else {
576		ret = _kvm_gsi_routing_write(vm, routing);
577		/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
578		if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
579			TEST_ASSERT(ret != 0 && errno == EINVAL,
580				"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
581				"error: rc: %i errno: %i", intid, ret, errno);
582		else
583			TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
584				"for intid %i failed, rc: %i errno: %i",
585				intid, ret, errno);
586	}
587}
588
589static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
590					struct kvm_vcpu *vcpu,
591					bool expect_failure)
592{
593	/*
594	 * Ignore this when expecting failure as invalid intids will lead to
595	 * either trying to inject SGIs when we configured the test to be
596	 * level_sensitive (or the reverse), or inject large intids which
597	 * will lead to writing above the ISPENDR register space (and we
598	 * don't want to do that either).
599	 */
600	if (!expect_failure)
601		kvm_irq_write_ispendr(gic_fd, intid, vcpu);
602}
603
604static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
605		uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
606		bool expect_failure)
607{
608	int fd[MAX_SPI];
609	uint64_t val;
610	int ret, f;
611	uint64_t i;
612
613	/*
614	 * There is no way to try injecting an SGI or PPI as the interface
615	 * starts counting from the first SPI (above the private ones), so just
616	 * exit.
617	 */
618	if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
619		return;
620
621	kvm_set_gsi_routing_irqchip_check(vm, intid, num,
622			kvm_max_routes, expect_failure);
623
624	/*
625	 * If expect_failure, then just to inject anyway. These
626	 * will silently fail. And in any case, the guest will check
627	 * that no actual interrupt was injected for those cases.
628	 */
629
630	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
631		fd[f] = eventfd(0, 0);
632		TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
633	}
634
635	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
636		struct kvm_irqfd irqfd = {
637			.fd  = fd[f],
638			.gsi = i - MIN_SPI,
639		};
640		assert(i <= (uint64_t)UINT_MAX);
641		vm_ioctl(vm, KVM_IRQFD, &irqfd);
642	}
643
644	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
645		val = 1;
646		ret = write(fd[f], &val, sizeof(uint64_t));
647		TEST_ASSERT(ret == sizeof(uint64_t),
648			    __KVM_SYSCALL_ERROR("write()", ret));
649	}
650
651	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
652		close(fd[f]);
653}
654
655/* handles the valid case: intid=0xffffffff num=1 */
656#define for_each_intid(first, num, tmp, i)					\
657	for ((tmp) = (i) = (first);						\
658		(tmp) < (uint64_t)(first) + (uint64_t)(num);			\
659		(tmp)++, (i)++)
660
661static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
662			  struct kvm_inject_args *inject_args,
663			  struct test_args *test_args)
664{
665	kvm_inject_cmd cmd = inject_args->cmd;
666	uint32_t intid = inject_args->first_intid;
667	uint32_t num = inject_args->num;
668	int level = inject_args->level;
669	bool expect_failure = inject_args->expect_failure;
670	struct kvm_vm *vm = vcpu->vm;
671	uint64_t tmp;
672	uint32_t i;
673
674	/* handles the valid case: intid=0xffffffff num=1 */
675	assert(intid < UINT_MAX - num || num == 1);
676
677	switch (cmd) {
678	case KVM_INJECT_EDGE_IRQ_LINE:
679		for_each_intid(intid, num, tmp, i)
680			kvm_irq_line_check(vm, i, 1, test_args,
681					expect_failure);
682		for_each_intid(intid, num, tmp, i)
683			kvm_irq_line_check(vm, i, 0, test_args,
684					expect_failure);
685		break;
686	case KVM_SET_IRQ_LINE:
687		for_each_intid(intid, num, tmp, i)
688			kvm_irq_line_check(vm, i, level, test_args,
689					expect_failure);
690		break;
691	case KVM_SET_IRQ_LINE_HIGH:
692		for_each_intid(intid, num, tmp, i)
693			kvm_irq_line_check(vm, i, 1, test_args,
694					expect_failure);
695		break;
696	case KVM_SET_LEVEL_INFO_HIGH:
697		for_each_intid(intid, num, tmp, i)
698			kvm_irq_set_level_info_check(gic_fd, i, 1,
699					expect_failure);
700		break;
701	case KVM_INJECT_IRQFD:
702		kvm_routing_and_irqfd_check(vm, intid, num,
703					test_args->kvm_max_routes,
704					expect_failure);
705		break;
706	case KVM_WRITE_ISPENDR:
707		for (i = intid; i < intid + num; i++)
708			kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
709						    expect_failure);
710		break;
711	case KVM_WRITE_ISACTIVER:
712		for (i = intid; i < intid + num; i++)
713			kvm_irq_write_isactiver(gic_fd, i, vcpu);
714		break;
715	default:
716		break;
717	}
718}
719
720static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
721		struct kvm_inject_args *args)
722{
723	struct kvm_inject_args *kvm_args_hva;
724	vm_vaddr_t kvm_args_gva;
725
726	kvm_args_gva = uc->args[1];
727	kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
728	memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
729}
730
731static void print_args(struct test_args *args)
732{
733	printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
734			args->nr_irqs, args->level_sensitive,
735			args->eoi_split);
736}
737
738static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
739{
740	struct ucall uc;
741	int gic_fd;
742	struct kvm_vcpu *vcpu;
743	struct kvm_vm *vm;
744	struct kvm_inject_args inject_args;
745	vm_vaddr_t args_gva;
746
747	struct test_args args = {
748		.nr_irqs = nr_irqs,
749		.level_sensitive = level_sensitive,
750		.eoi_split = eoi_split,
751		.kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
752		.kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
753	};
754
755	print_args(&args);
756
757	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
758
759	vm_init_descriptor_tables(vm);
760	vcpu_init_descriptor_tables(vcpu);
761
762	/* Setup the guest args page (so it gets the args). */
763	args_gva = vm_vaddr_alloc_page(vm);
764	memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
765	vcpu_args_set(vcpu, 1, args_gva);
766
767	gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
768			GICD_BASE_GPA, GICR_BASE_GPA);
769	__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
770
771	vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
772		guest_irq_handlers[args.eoi_split][args.level_sensitive]);
773
774	while (1) {
775		vcpu_run(vcpu);
776
777		switch (get_ucall(vcpu, &uc)) {
778		case UCALL_SYNC:
779			kvm_inject_get_call(vm, &uc, &inject_args);
780			run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
781			break;
782		case UCALL_ABORT:
783			REPORT_GUEST_ASSERT(uc);
784			break;
785		case UCALL_DONE:
786			goto done;
787		default:
788			TEST_FAIL("Unknown ucall %lu", uc.cmd);
789		}
790	}
791
792done:
793	close(gic_fd);
794	kvm_vm_free(vm);
795}
796
797static void help(const char *name)
798{
799	printf(
800	"\n"
801	"usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
802	printf(" -n: specify number of IRQs to setup the vgic with. "
803		"It has to be a multiple of 32 and between 64 and 1024.\n");
804	printf(" -e: if 1 then EOI is split into a write to DIR on top "
805		"of writing EOI.\n");
806	printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
807	puts("");
808	exit(1);
809}
810
811int main(int argc, char **argv)
812{
813	uint32_t nr_irqs = 64;
814	bool default_args = true;
815	bool level_sensitive = false;
816	int opt;
817	bool eoi_split = false;
818
819	while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
820		switch (opt) {
821		case 'n':
822			nr_irqs = atoi_non_negative("Number of IRQs", optarg);
823			if (nr_irqs > 1024 || nr_irqs % 32)
824				help(argv[0]);
825			break;
826		case 'e':
827			eoi_split = (bool)atoi_paranoid(optarg);
828			default_args = false;
829			break;
830		case 'l':
831			level_sensitive = (bool)atoi_paranoid(optarg);
832			default_args = false;
833			break;
834		case 'h':
835		default:
836			help(argv[0]);
837			break;
838		}
839	}
840
841	/*
842	 * If the user just specified nr_irqs and/or gic_version, then run all
843	 * combinations.
844	 */
845	if (default_args) {
846		test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
847		test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
848		test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
849		test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
850	} else {
851		test_vgic(nr_irqs, level_sensitive, eoi_split);
852	}
853
854	return 0;
855}
856