1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Test for s390x KVM_S390_MEM_OP
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10#include <sys/ioctl.h>
11#include <pthread.h>
12
13#include <linux/bits.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "kselftest.h"
18
19enum mop_target {
20	LOGICAL,
21	SIDA,
22	ABSOLUTE,
23	INVALID,
24};
25
26enum mop_access_mode {
27	READ,
28	WRITE,
29	CMPXCHG,
30};
31
32struct mop_desc {
33	uintptr_t gaddr;
34	uintptr_t gaddr_v;
35	uint64_t set_flags;
36	unsigned int f_check : 1;
37	unsigned int f_inject : 1;
38	unsigned int f_key : 1;
39	unsigned int _gaddr_v : 1;
40	unsigned int _set_flags : 1;
41	unsigned int _sida_offset : 1;
42	unsigned int _ar : 1;
43	uint32_t size;
44	enum mop_target target;
45	enum mop_access_mode mode;
46	void *buf;
47	uint32_t sida_offset;
48	void *old;
49	uint8_t old_value[16];
50	bool *cmpxchg_success;
51	uint8_t ar;
52	uint8_t key;
53};
54
55const uint8_t NO_KEY = 0xff;
56
57static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
58{
59	struct kvm_s390_mem_op ksmo = {
60		.gaddr = (uintptr_t)desc->gaddr,
61		.size = desc->size,
62		.buf = ((uintptr_t)desc->buf),
63		.reserved = "ignored_ignored_ignored_ignored"
64	};
65
66	switch (desc->target) {
67	case LOGICAL:
68		if (desc->mode == READ)
69			ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
70		if (desc->mode == WRITE)
71			ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
72		break;
73	case SIDA:
74		if (desc->mode == READ)
75			ksmo.op = KVM_S390_MEMOP_SIDA_READ;
76		if (desc->mode == WRITE)
77			ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
78		break;
79	case ABSOLUTE:
80		if (desc->mode == READ)
81			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
82		if (desc->mode == WRITE)
83			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
84		if (desc->mode == CMPXCHG) {
85			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
86			ksmo.old_addr = (uint64_t)desc->old;
87			memcpy(desc->old_value, desc->old, desc->size);
88		}
89		break;
90	case INVALID:
91		ksmo.op = -1;
92	}
93	if (desc->f_check)
94		ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
95	if (desc->f_inject)
96		ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
97	if (desc->_set_flags)
98		ksmo.flags = desc->set_flags;
99	if (desc->f_key && desc->key != NO_KEY) {
100		ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
101		ksmo.key = desc->key;
102	}
103	if (desc->_ar)
104		ksmo.ar = desc->ar;
105	else
106		ksmo.ar = 0;
107	if (desc->_sida_offset)
108		ksmo.sida_offset = desc->sida_offset;
109
110	return ksmo;
111}
112
113struct test_info {
114	struct kvm_vm *vm;
115	struct kvm_vcpu *vcpu;
116};
117
118#define PRINT_MEMOP false
119static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
120{
121	if (!PRINT_MEMOP)
122		return;
123
124	if (!vcpu)
125		printf("vm memop(");
126	else
127		printf("vcpu memop(");
128	switch (ksmo->op) {
129	case KVM_S390_MEMOP_LOGICAL_READ:
130		printf("LOGICAL, READ, ");
131		break;
132	case KVM_S390_MEMOP_LOGICAL_WRITE:
133		printf("LOGICAL, WRITE, ");
134		break;
135	case KVM_S390_MEMOP_SIDA_READ:
136		printf("SIDA, READ, ");
137		break;
138	case KVM_S390_MEMOP_SIDA_WRITE:
139		printf("SIDA, WRITE, ");
140		break;
141	case KVM_S390_MEMOP_ABSOLUTE_READ:
142		printf("ABSOLUTE, READ, ");
143		break;
144	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
145		printf("ABSOLUTE, WRITE, ");
146		break;
147	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
148		printf("ABSOLUTE, CMPXCHG, ");
149		break;
150	}
151	printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
152	       ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
153	       ksmo->old_addr);
154	if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
155		printf(", CHECK_ONLY");
156	if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
157		printf(", INJECT_EXCEPTION");
158	if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
159		printf(", SKEY_PROTECTION");
160	puts(")");
161}
162
163static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
164			   struct mop_desc *desc)
165{
166	struct kvm_vcpu *vcpu = info.vcpu;
167
168	if (!vcpu)
169		return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
170	else
171		return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
172}
173
174static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
175			struct mop_desc *desc)
176{
177	int r;
178
179	r = err_memop_ioctl(info, ksmo, desc);
180	if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
181		if (desc->cmpxchg_success) {
182			int diff = memcmp(desc->old_value, desc->old, desc->size);
183			*desc->cmpxchg_success = !diff;
184		}
185	}
186	TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
187}
188
189#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...)	\
190({										\
191	struct test_info __info = (info_p);					\
192	struct mop_desc __desc = {						\
193		.target = (mop_target_p),					\
194		.mode = (access_mode_p),					\
195		.buf = (buf_p),							\
196		.size = (size_p),						\
197		__VA_ARGS__							\
198	};									\
199	struct kvm_s390_mem_op __ksmo;						\
200										\
201	if (__desc._gaddr_v) {							\
202		if (__desc.target == ABSOLUTE)					\
203			__desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v);	\
204		else								\
205			__desc.gaddr = __desc.gaddr_v;				\
206	}									\
207	__ksmo = ksmo_from_desc(&__desc);					\
208	print_memop(__info.vcpu, &__ksmo);					\
209	err##memop_ioctl(__info, &__ksmo, &__desc);				\
210})
211
212#define MOP(...) MEMOP(, __VA_ARGS__)
213#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
214
215#define GADDR(a) .gaddr = ((uintptr_t)a)
216#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
217#define CHECK_ONLY .f_check = 1
218#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
219#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
220#define AR(a) ._ar = 1, .ar = (a)
221#define KEY(a) .f_key = 1, .key = (a)
222#define INJECT .f_inject = 1
223#define CMPXCHG_OLD(o) .old = (o)
224#define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
225
226#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
227
228#define PAGE_SHIFT 12
229#define PAGE_SIZE (1ULL << PAGE_SHIFT)
230#define PAGE_MASK (~(PAGE_SIZE - 1))
231#define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
232#define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
233
234static uint8_t __aligned(PAGE_SIZE) mem1[65536];
235static uint8_t __aligned(PAGE_SIZE) mem2[65536];
236
237struct test_default {
238	struct kvm_vm *kvm_vm;
239	struct test_info vm;
240	struct test_info vcpu;
241	struct kvm_run *run;
242	int size;
243};
244
245static struct test_default test_default_init(void *guest_code)
246{
247	struct kvm_vcpu *vcpu;
248	struct test_default t;
249
250	t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
251	t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
252	t.vm = (struct test_info) { t.kvm_vm, NULL };
253	t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
254	t.run = vcpu->run;
255	return t;
256}
257
258enum stage {
259	/* Synced state set by host, e.g. DAT */
260	STAGE_INITED,
261	/* Guest did nothing */
262	STAGE_IDLED,
263	/* Guest set storage keys (specifics up to test case) */
264	STAGE_SKEYS_SET,
265	/* Guest copied memory (locations up to test case) */
266	STAGE_COPIED,
267	/* End of guest code reached */
268	STAGE_DONE,
269};
270
271#define HOST_SYNC(info_p, stage)					\
272({									\
273	struct test_info __info = (info_p);				\
274	struct kvm_vcpu *__vcpu = __info.vcpu;				\
275	struct ucall uc;						\
276	int __stage = (stage);						\
277									\
278	vcpu_run(__vcpu);						\
279	get_ucall(__vcpu, &uc);						\
280	if (uc.cmd == UCALL_ABORT) {					\
281		REPORT_GUEST_ASSERT(uc);				\
282	}								\
283	TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC);				\
284	TEST_ASSERT_EQ(uc.args[1], __stage);				\
285})									\
286
287static void prepare_mem12(void)
288{
289	int i;
290
291	for (i = 0; i < sizeof(mem1); i++)
292		mem1[i] = rand();
293	memset(mem2, 0xaa, sizeof(mem2));
294}
295
296#define ASSERT_MEM_EQ(p1, p2, size) \
297	TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
298
299static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
300			       enum mop_target mop_target, uint32_t size, uint8_t key)
301{
302	prepare_mem12();
303	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
304		   GADDR_V(mem1), KEY(key));
305	HOST_SYNC(copy_cpu, STAGE_COPIED);
306	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
307		   GADDR_V(mem2), KEY(key));
308	ASSERT_MEM_EQ(mem1, mem2, size);
309}
310
311static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
312			 enum mop_target mop_target, uint32_t size, uint8_t key)
313{
314	prepare_mem12();
315	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
316	HOST_SYNC(copy_cpu, STAGE_COPIED);
317	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
318		   GADDR_V(mem2), KEY(key));
319	ASSERT_MEM_EQ(mem1, mem2, size);
320}
321
322static void default_cmpxchg(struct test_default *test, uint8_t key)
323{
324	for (int size = 1; size <= 16; size *= 2) {
325		for (int offset = 0; offset < 16; offset += size) {
326			uint8_t __aligned(16) new[16] = {};
327			uint8_t __aligned(16) old[16];
328			bool succ;
329
330			prepare_mem12();
331			default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
332
333			memcpy(&old, mem1, 16);
334			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
335			    size, GADDR_V(mem1 + offset),
336			    CMPXCHG_OLD(old + offset),
337			    CMPXCHG_SUCCESS(&succ), KEY(key));
338			HOST_SYNC(test->vcpu, STAGE_COPIED);
339			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
340			TEST_ASSERT(succ, "exchange of values should succeed");
341			memcpy(mem1 + offset, new + offset, size);
342			ASSERT_MEM_EQ(mem1, mem2, 16);
343
344			memcpy(&old, mem1, 16);
345			new[offset]++;
346			old[offset]++;
347			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
348			    size, GADDR_V(mem1 + offset),
349			    CMPXCHG_OLD(old + offset),
350			    CMPXCHG_SUCCESS(&succ), KEY(key));
351			HOST_SYNC(test->vcpu, STAGE_COPIED);
352			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
353			TEST_ASSERT(!succ, "exchange of values should not succeed");
354			ASSERT_MEM_EQ(mem1, mem2, 16);
355			ASSERT_MEM_EQ(&old, mem1, 16);
356		}
357	}
358}
359
360static void guest_copy(void)
361{
362	GUEST_SYNC(STAGE_INITED);
363	memcpy(&mem2, &mem1, sizeof(mem2));
364	GUEST_SYNC(STAGE_COPIED);
365}
366
367static void test_copy(void)
368{
369	struct test_default t = test_default_init(guest_copy);
370
371	HOST_SYNC(t.vcpu, STAGE_INITED);
372
373	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
374
375	kvm_vm_free(t.kvm_vm);
376}
377
378static void test_copy_access_register(void)
379{
380	struct test_default t = test_default_init(guest_copy);
381
382	HOST_SYNC(t.vcpu, STAGE_INITED);
383
384	prepare_mem12();
385	t.run->psw_mask &= ~(3UL << (63 - 17));
386	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
387
388	/*
389	 * Primary address space gets used if an access register
390	 * contains zero. The host makes use of AR[1] so is a good
391	 * candidate to ensure the guest AR (of zero) is used.
392	 */
393	CHECK_N_DO(MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size,
394		   GADDR_V(mem1), AR(1));
395	HOST_SYNC(t.vcpu, STAGE_COPIED);
396
397	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, t.size,
398		   GADDR_V(mem2), AR(1));
399	ASSERT_MEM_EQ(mem1, mem2, t.size);
400
401	kvm_vm_free(t.kvm_vm);
402}
403
404static void set_storage_key_range(void *addr, size_t len, uint8_t key)
405{
406	uintptr_t _addr, abs, i;
407	int not_mapped = 0;
408
409	_addr = (uintptr_t)addr;
410	for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
411		abs = i;
412		asm volatile (
413			       "lra	%[abs], 0(0,%[abs])\n"
414			"	jz	0f\n"
415			"	llill	%[not_mapped],1\n"
416			"	j	1f\n"
417			"0:	sske	%[key], %[abs]\n"
418			"1:"
419			: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
420			: [key] "r" (key)
421			: "cc"
422		);
423		GUEST_ASSERT_EQ(not_mapped, 0);
424	}
425}
426
427static void guest_copy_key(void)
428{
429	set_storage_key_range(mem1, sizeof(mem1), 0x90);
430	set_storage_key_range(mem2, sizeof(mem2), 0x90);
431	GUEST_SYNC(STAGE_SKEYS_SET);
432
433	for (;;) {
434		memcpy(&mem2, &mem1, sizeof(mem2));
435		GUEST_SYNC(STAGE_COPIED);
436	}
437}
438
439static void test_copy_key(void)
440{
441	struct test_default t = test_default_init(guest_copy_key);
442
443	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
444
445	/* vm, no key */
446	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
447
448	/* vm/vcpu, machting key or key 0 */
449	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
450	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
451	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
452	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
453	/*
454	 * There used to be different code paths for key handling depending on
455	 * if the region crossed a page boundary.
456	 * There currently are not, but the more tests the merrier.
457	 */
458	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
459	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
460	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
461	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
462
463	/* vm/vcpu, mismatching keys on read, but no fetch protection */
464	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
465	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
466
467	kvm_vm_free(t.kvm_vm);
468}
469
470static void test_cmpxchg_key(void)
471{
472	struct test_default t = test_default_init(guest_copy_key);
473
474	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
475
476	default_cmpxchg(&t, NO_KEY);
477	default_cmpxchg(&t, 0);
478	default_cmpxchg(&t, 9);
479
480	kvm_vm_free(t.kvm_vm);
481}
482
483static __uint128_t cut_to_size(int size, __uint128_t val)
484{
485	switch (size) {
486	case 1:
487		return (uint8_t)val;
488	case 2:
489		return (uint16_t)val;
490	case 4:
491		return (uint32_t)val;
492	case 8:
493		return (uint64_t)val;
494	case 16:
495		return val;
496	}
497	GUEST_FAIL("Invalid size = %u", size);
498	return 0;
499}
500
501static bool popcount_eq(__uint128_t a, __uint128_t b)
502{
503	unsigned int count_a, count_b;
504
505	count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
506		  __builtin_popcountl((uint64_t)a);
507	count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
508		  __builtin_popcountl((uint64_t)b);
509	return count_a == count_b;
510}
511
512static __uint128_t rotate(int size, __uint128_t val, int amount)
513{
514	unsigned int bits = size * 8;
515
516	amount = (amount + bits) % bits;
517	val = cut_to_size(size, val);
518	if (!amount)
519		return val;
520	return (val << (bits - amount)) | (val >> amount);
521}
522
523const unsigned int max_block = 16;
524
525static void choose_block(bool guest, int i, int *size, int *offset)
526{
527	unsigned int rand;
528
529	rand = i;
530	if (guest) {
531		rand = rand * 19 + 11;
532		*size = 1 << ((rand % 3) + 2);
533		rand = rand * 19 + 11;
534		*offset = (rand % max_block) & ~(*size - 1);
535	} else {
536		rand = rand * 17 + 5;
537		*size = 1 << (rand % 5);
538		rand = rand * 17 + 5;
539		*offset = (rand % max_block) & ~(*size - 1);
540	}
541}
542
543static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
544{
545	unsigned int rand;
546	int amount;
547	bool swap;
548
549	rand = i;
550	rand = rand * 3 + 1;
551	if (guest)
552		rand = rand * 3 + 1;
553	swap = rand % 2 == 0;
554	if (swap) {
555		int i, j;
556		__uint128_t new;
557		uint8_t byte0, byte1;
558
559		rand = rand * 3 + 1;
560		i = rand % size;
561		rand = rand * 3 + 1;
562		j = rand % size;
563		if (i == j)
564			return old;
565		new = rotate(16, old, i * 8);
566		byte0 = new & 0xff;
567		new &= ~0xff;
568		new = rotate(16, new, -i * 8);
569		new = rotate(16, new, j * 8);
570		byte1 = new & 0xff;
571		new = (new & ~0xff) | byte0;
572		new = rotate(16, new, -j * 8);
573		new = rotate(16, new, i * 8);
574		new = new | byte1;
575		new = rotate(16, new, -i * 8);
576		return new;
577	}
578	rand = rand * 3 + 1;
579	amount = rand % (size * 8);
580	return rotate(size, old, amount);
581}
582
583static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
584{
585	bool ret;
586
587	switch (size) {
588	case 4: {
589			uint32_t old = *old_addr;
590
591			asm volatile ("cs %[old],%[new],%[address]"
592			    : [old] "+d" (old),
593			      [address] "+Q" (*(uint32_t *)(target))
594			    : [new] "d" ((uint32_t)new)
595			    : "cc"
596			);
597			ret = old == (uint32_t)*old_addr;
598			*old_addr = old;
599			return ret;
600		}
601	case 8: {
602			uint64_t old = *old_addr;
603
604			asm volatile ("csg %[old],%[new],%[address]"
605			    : [old] "+d" (old),
606			      [address] "+Q" (*(uint64_t *)(target))
607			    : [new] "d" ((uint64_t)new)
608			    : "cc"
609			);
610			ret = old == (uint64_t)*old_addr;
611			*old_addr = old;
612			return ret;
613		}
614	case 16: {
615			__uint128_t old = *old_addr;
616
617			asm volatile ("cdsg %[old],%[new],%[address]"
618			    : [old] "+d" (old),
619			      [address] "+Q" (*(__uint128_t *)(target))
620			    : [new] "d" (new)
621			    : "cc"
622			);
623			ret = old == *old_addr;
624			*old_addr = old;
625			return ret;
626		}
627	}
628	GUEST_FAIL("Invalid size = %u", size);
629	return 0;
630}
631
632const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
633
634static void guest_cmpxchg_key(void)
635{
636	int size, offset;
637	__uint128_t old, new;
638
639	set_storage_key_range(mem1, max_block, 0x10);
640	set_storage_key_range(mem2, max_block, 0x10);
641	GUEST_SYNC(STAGE_SKEYS_SET);
642
643	for (int i = 0; i < cmpxchg_iter_outer; i++) {
644		do {
645			old = 1;
646		} while (!_cmpxchg(16, mem1, &old, 0));
647		for (int j = 0; j < cmpxchg_iter_inner; j++) {
648			choose_block(true, i + j, &size, &offset);
649			do {
650				new = permutate_bits(true, i + j, size, old);
651			} while (!_cmpxchg(size, mem2 + offset, &old, new));
652		}
653	}
654
655	GUEST_SYNC(STAGE_DONE);
656}
657
658static void *run_guest(void *data)
659{
660	struct test_info *info = data;
661
662	HOST_SYNC(*info, STAGE_DONE);
663	return NULL;
664}
665
666static char *quad_to_char(__uint128_t *quad, int size)
667{
668	return ((char *)quad) + (sizeof(*quad) - size);
669}
670
671static void test_cmpxchg_key_concurrent(void)
672{
673	struct test_default t = test_default_init(guest_cmpxchg_key);
674	int size, offset;
675	__uint128_t old, new;
676	bool success;
677	pthread_t thread;
678
679	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
680	prepare_mem12();
681	MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
682	pthread_create(&thread, NULL, run_guest, &t.vcpu);
683
684	for (int i = 0; i < cmpxchg_iter_outer; i++) {
685		do {
686			old = 0;
687			new = 1;
688			MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
689			    sizeof(new), GADDR_V(mem1),
690			    CMPXCHG_OLD(&old),
691			    CMPXCHG_SUCCESS(&success), KEY(1));
692		} while (!success);
693		for (int j = 0; j < cmpxchg_iter_inner; j++) {
694			choose_block(false, i + j, &size, &offset);
695			do {
696				new = permutate_bits(false, i + j, size, old);
697				MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
698				    size, GADDR_V(mem2 + offset),
699				    CMPXCHG_OLD(quad_to_char(&old, size)),
700				    CMPXCHG_SUCCESS(&success), KEY(1));
701			} while (!success);
702		}
703	}
704
705	pthread_join(thread, NULL);
706
707	MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
708	TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
709		    "Must retain number of set bits");
710
711	kvm_vm_free(t.kvm_vm);
712}
713
714static void guest_copy_key_fetch_prot(void)
715{
716	/*
717	 * For some reason combining the first sync with override enablement
718	 * results in an exception when calling HOST_SYNC.
719	 */
720	GUEST_SYNC(STAGE_INITED);
721	/* Storage protection override applies to both store and fetch. */
722	set_storage_key_range(mem1, sizeof(mem1), 0x98);
723	set_storage_key_range(mem2, sizeof(mem2), 0x98);
724	GUEST_SYNC(STAGE_SKEYS_SET);
725
726	for (;;) {
727		memcpy(&mem2, &mem1, sizeof(mem2));
728		GUEST_SYNC(STAGE_COPIED);
729	}
730}
731
732static void test_copy_key_storage_prot_override(void)
733{
734	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
735
736	HOST_SYNC(t.vcpu, STAGE_INITED);
737	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
738	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
739	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
740
741	/* vcpu, mismatching keys, storage protection override in effect */
742	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
743
744	kvm_vm_free(t.kvm_vm);
745}
746
747static void test_copy_key_fetch_prot(void)
748{
749	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
750
751	HOST_SYNC(t.vcpu, STAGE_INITED);
752	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
753
754	/* vm/vcpu, matching key, fetch protection in effect */
755	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
756	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
757
758	kvm_vm_free(t.kvm_vm);
759}
760
761#define ERR_PROT_MOP(...)							\
762({										\
763	int rv;									\
764										\
765	rv = ERR_MOP(__VA_ARGS__);						\
766	TEST_ASSERT(rv == 4, "Should result in protection exception");		\
767})
768
769static void guest_error_key(void)
770{
771	GUEST_SYNC(STAGE_INITED);
772	set_storage_key_range(mem1, PAGE_SIZE, 0x18);
773	set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
774	GUEST_SYNC(STAGE_SKEYS_SET);
775	GUEST_SYNC(STAGE_IDLED);
776}
777
778static void test_errors_key(void)
779{
780	struct test_default t = test_default_init(guest_error_key);
781
782	HOST_SYNC(t.vcpu, STAGE_INITED);
783	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
784
785	/* vm/vcpu, mismatching keys, fetch protection in effect */
786	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
787	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
788	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
789	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
790
791	kvm_vm_free(t.kvm_vm);
792}
793
794static void test_errors_cmpxchg_key(void)
795{
796	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
797	int i;
798
799	HOST_SYNC(t.vcpu, STAGE_INITED);
800	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
801
802	for (i = 1; i <= 16; i *= 2) {
803		__uint128_t old = 0;
804
805		ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
806			     CMPXCHG_OLD(&old), KEY(2));
807	}
808
809	kvm_vm_free(t.kvm_vm);
810}
811
812static void test_termination(void)
813{
814	struct test_default t = test_default_init(guest_error_key);
815	uint64_t prefix;
816	uint64_t teid;
817	uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
818	uint64_t psw[2];
819
820	HOST_SYNC(t.vcpu, STAGE_INITED);
821	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
822
823	/* vcpu, mismatching keys after first page */
824	ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
825	/*
826	 * The memop injected a program exception and the test needs to check the
827	 * Translation-Exception Identification (TEID). It is necessary to run
828	 * the guest in order to be able to read the TEID from guest memory.
829	 * Set the guest program new PSW, so the guest state is not clobbered.
830	 */
831	prefix = t.run->s.regs.prefix;
832	psw[0] = t.run->psw_mask;
833	psw[1] = t.run->psw_addr;
834	MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
835	HOST_SYNC(t.vcpu, STAGE_IDLED);
836	MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
837	/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
838	TEST_ASSERT_EQ(teid & teid_mask, 0);
839
840	kvm_vm_free(t.kvm_vm);
841}
842
843static void test_errors_key_storage_prot_override(void)
844{
845	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
846
847	HOST_SYNC(t.vcpu, STAGE_INITED);
848	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
849	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
850	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
851
852	/* vm, mismatching keys, storage protection override not applicable to vm */
853	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
854	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
855
856	kvm_vm_free(t.kvm_vm);
857}
858
859const uint64_t last_page_addr = -PAGE_SIZE;
860
861static void guest_copy_key_fetch_prot_override(void)
862{
863	int i;
864	char *page_0 = 0;
865
866	GUEST_SYNC(STAGE_INITED);
867	set_storage_key_range(0, PAGE_SIZE, 0x18);
868	set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
869	asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
870	GUEST_SYNC(STAGE_SKEYS_SET);
871
872	for (;;) {
873		for (i = 0; i < PAGE_SIZE; i++)
874			page_0[i] = mem1[i];
875		GUEST_SYNC(STAGE_COPIED);
876	}
877}
878
879static void test_copy_key_fetch_prot_override(void)
880{
881	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
882	vm_vaddr_t guest_0_page, guest_last_page;
883
884	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
885	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
886	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
887		print_skip("did not allocate guest pages at required positions");
888		goto out;
889	}
890
891	HOST_SYNC(t.vcpu, STAGE_INITED);
892	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
893	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
894	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
895
896	/* vcpu, mismatching keys on fetch, fetch protection override applies */
897	prepare_mem12();
898	MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
899	HOST_SYNC(t.vcpu, STAGE_COPIED);
900	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
901	ASSERT_MEM_EQ(mem1, mem2, 2048);
902
903	/*
904	 * vcpu, mismatching keys on fetch, fetch protection override applies,
905	 * wraparound
906	 */
907	prepare_mem12();
908	MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
909	HOST_SYNC(t.vcpu, STAGE_COPIED);
910	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
911		   GADDR_V(guest_last_page), KEY(2));
912	ASSERT_MEM_EQ(mem1, mem2, 2048);
913
914out:
915	kvm_vm_free(t.kvm_vm);
916}
917
918static void test_errors_key_fetch_prot_override_not_enabled(void)
919{
920	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
921	vm_vaddr_t guest_0_page, guest_last_page;
922
923	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
924	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
925	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
926		print_skip("did not allocate guest pages at required positions");
927		goto out;
928	}
929	HOST_SYNC(t.vcpu, STAGE_INITED);
930	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
931
932	/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
933	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
934
935out:
936	kvm_vm_free(t.kvm_vm);
937}
938
939static void test_errors_key_fetch_prot_override_enabled(void)
940{
941	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
942	vm_vaddr_t guest_0_page, guest_last_page;
943
944	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
945	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
946	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
947		print_skip("did not allocate guest pages at required positions");
948		goto out;
949	}
950	HOST_SYNC(t.vcpu, STAGE_INITED);
951	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
952	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
953	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
954
955	/*
956	 * vcpu, mismatching keys on fetch,
957	 * fetch protection override does not apply because memory range exceeded
958	 */
959	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
960	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
961		   GADDR_V(guest_last_page), KEY(2));
962	/* vm, fetch protected override does not apply */
963	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
964	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
965
966out:
967	kvm_vm_free(t.kvm_vm);
968}
969
970static void guest_idle(void)
971{
972	GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
973	for (;;)
974		GUEST_SYNC(STAGE_IDLED);
975}
976
977static void _test_errors_common(struct test_info info, enum mop_target target, int size)
978{
979	int rv;
980
981	/* Bad size: */
982	rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
983	TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
984
985	/* Zero size: */
986	rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
987	TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
988		    "ioctl allows 0 as size");
989
990	/* Bad flags: */
991	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
992	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
993
994	/* Bad guest address: */
995	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
996	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
997	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
998	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
999
1000	/* Bad host address: */
1001	rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
1002	TEST_ASSERT(rv == -1 && errno == EFAULT,
1003		    "ioctl does not report bad host memory address");
1004
1005	/* Bad key: */
1006	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
1007	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
1008}
1009
1010static void test_errors(void)
1011{
1012	struct test_default t = test_default_init(guest_idle);
1013	int rv;
1014
1015	HOST_SYNC(t.vcpu, STAGE_INITED);
1016
1017	_test_errors_common(t.vcpu, LOGICAL, t.size);
1018	_test_errors_common(t.vm, ABSOLUTE, t.size);
1019
1020	/* Bad operation: */
1021	rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
1022	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1023	/* virtual addresses are not translated when passing INVALID */
1024	rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
1025	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1026
1027	/* Bad access register: */
1028	t.run->psw_mask &= ~(3UL << (63 - 17));
1029	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
1030	HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
1031	rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
1032	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
1033	t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
1034	HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
1035
1036	/* Check that the SIDA calls are rejected for non-protected guests */
1037	rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1038	TEST_ASSERT(rv == -1 && errno == EINVAL,
1039		    "ioctl does not reject SIDA_READ in non-protected mode");
1040	rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1041	TEST_ASSERT(rv == -1 && errno == EINVAL,
1042		    "ioctl does not reject SIDA_WRITE in non-protected mode");
1043
1044	kvm_vm_free(t.kvm_vm);
1045}
1046
1047static void test_errors_cmpxchg(void)
1048{
1049	struct test_default t = test_default_init(guest_idle);
1050	__uint128_t old;
1051	int rv, i, power = 1;
1052
1053	HOST_SYNC(t.vcpu, STAGE_INITED);
1054
1055	for (i = 0; i < 32; i++) {
1056		if (i == power) {
1057			power *= 2;
1058			continue;
1059		}
1060		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
1061			     CMPXCHG_OLD(&old));
1062		TEST_ASSERT(rv == -1 && errno == EINVAL,
1063			    "ioctl allows bad size for cmpxchg");
1064	}
1065	for (i = 1; i <= 16; i *= 2) {
1066		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
1067			     CMPXCHG_OLD(&old));
1068		TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
1069	}
1070	for (i = 2; i <= 16; i *= 2) {
1071		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
1072			     CMPXCHG_OLD(&old));
1073		TEST_ASSERT(rv == -1 && errno == EINVAL,
1074			    "ioctl allows bad alignment for cmpxchg");
1075	}
1076
1077	kvm_vm_free(t.kvm_vm);
1078}
1079
1080int main(int argc, char *argv[])
1081{
1082	int extension_cap, idx;
1083
1084	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
1085	extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
1086
1087	struct testdef {
1088		const char *name;
1089		void (*test)(void);
1090		bool requirements_met;
1091	} testlist[] = {
1092		{
1093			.name = "simple copy",
1094			.test = test_copy,
1095			.requirements_met = true,
1096		},
1097		{
1098			.name = "generic error checks",
1099			.test = test_errors,
1100			.requirements_met = true,
1101		},
1102		{
1103			.name = "copy with storage keys",
1104			.test = test_copy_key,
1105			.requirements_met = extension_cap > 0,
1106		},
1107		{
1108			.name = "cmpxchg with storage keys",
1109			.test = test_cmpxchg_key,
1110			.requirements_met = extension_cap & 0x2,
1111		},
1112		{
1113			.name = "concurrently cmpxchg with storage keys",
1114			.test = test_cmpxchg_key_concurrent,
1115			.requirements_met = extension_cap & 0x2,
1116		},
1117		{
1118			.name = "copy with key storage protection override",
1119			.test = test_copy_key_storage_prot_override,
1120			.requirements_met = extension_cap > 0,
1121		},
1122		{
1123			.name = "copy with key fetch protection",
1124			.test = test_copy_key_fetch_prot,
1125			.requirements_met = extension_cap > 0,
1126		},
1127		{
1128			.name = "copy with key fetch protection override",
1129			.test = test_copy_key_fetch_prot_override,
1130			.requirements_met = extension_cap > 0,
1131		},
1132		{
1133			.name = "copy with access register mode",
1134			.test = test_copy_access_register,
1135			.requirements_met = true,
1136		},
1137		{
1138			.name = "error checks with key",
1139			.test = test_errors_key,
1140			.requirements_met = extension_cap > 0,
1141		},
1142		{
1143			.name = "error checks for cmpxchg with key",
1144			.test = test_errors_cmpxchg_key,
1145			.requirements_met = extension_cap & 0x2,
1146		},
1147		{
1148			.name = "error checks for cmpxchg",
1149			.test = test_errors_cmpxchg,
1150			.requirements_met = extension_cap & 0x2,
1151		},
1152		{
1153			.name = "termination",
1154			.test = test_termination,
1155			.requirements_met = extension_cap > 0,
1156		},
1157		{
1158			.name = "error checks with key storage protection override",
1159			.test = test_errors_key_storage_prot_override,
1160			.requirements_met = extension_cap > 0,
1161		},
1162		{
1163			.name = "error checks without key fetch prot override",
1164			.test = test_errors_key_fetch_prot_override_not_enabled,
1165			.requirements_met = extension_cap > 0,
1166		},
1167		{
1168			.name = "error checks with key fetch prot override",
1169			.test = test_errors_key_fetch_prot_override_enabled,
1170			.requirements_met = extension_cap > 0,
1171		},
1172	};
1173
1174	ksft_print_header();
1175	ksft_set_plan(ARRAY_SIZE(testlist));
1176
1177	for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
1178		if (testlist[idx].requirements_met) {
1179			testlist[idx].test();
1180			ksft_test_result_pass("%s\n", testlist[idx].name);
1181		} else {
1182			ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
1183					      testlist[idx].name, extension_cap);
1184		}
1185	}
1186
1187	ksft_finished();	/* Print results and exit() accordingly */
1188}
1189