1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3#include <stdlib.h>
4#include <sys/mman.h>
5#include <sys/eventfd.h>
6
7#define __EXPORTED_HEADERS__
8#include <linux/vfio.h>
9
10#include "iommufd_utils.h"
11
12static unsigned long HUGEPAGE_SIZE;
13
14#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
16
17static unsigned long get_huge_page_size(void)
18{
19	char buf[80];
20	int ret;
21	int fd;
22
23	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
24		  O_RDONLY);
25	if (fd < 0)
26		return 2 * 1024 * 1024;
27
28	ret = read(fd, buf, sizeof(buf));
29	close(fd);
30	if (ret <= 0 || ret == sizeof(buf))
31		return 2 * 1024 * 1024;
32	buf[ret] = 0;
33	return strtoul(buf, NULL, 10);
34}
35
36static __attribute__((constructor)) void setup_sizes(void)
37{
38	void *vrc;
39	int rc;
40
41	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
42	HUGEPAGE_SIZE = get_huge_page_size();
43
44	BUFFER_SIZE = PAGE_SIZE * 16;
45	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
46	assert(!rc);
47	assert(buffer);
48	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
49	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
50		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
51	assert(vrc == buffer);
52}
53
54FIXTURE(iommufd)
55{
56	int fd;
57};
58
59FIXTURE_SETUP(iommufd)
60{
61	self->fd = open("/dev/iommu", O_RDWR);
62	ASSERT_NE(-1, self->fd);
63}
64
65FIXTURE_TEARDOWN(iommufd)
66{
67	teardown_iommufd(self->fd, _metadata);
68}
69
70TEST_F(iommufd, simple_close)
71{
72}
73
74TEST_F(iommufd, cmd_fail)
75{
76	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
77
78	/* object id is invalid */
79	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
80	/* Bad pointer */
81	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
82	/* Unknown ioctl */
83	EXPECT_ERRNO(ENOTTY,
84		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
85			   &cmd));
86}
87
88TEST_F(iommufd, cmd_length)
89{
90#define TEST_LENGTH(_struct, _ioctl, _last)                              \
91	{                                                                \
92		size_t min_size = offsetofend(struct _struct, _last);    \
93		struct {                                                 \
94			struct _struct cmd;                              \
95			uint8_t extra;                                   \
96		} cmd = { .cmd = { .size = min_size - 1 },               \
97			  .extra = UINT8_MAX };                          \
98		int old_errno;                                           \
99		int rc;                                                  \
100									 \
101		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
102		cmd.cmd.size = sizeof(struct _struct) + 1;               \
103		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
104		cmd.cmd.size = sizeof(struct _struct);                   \
105		rc = ioctl(self->fd, _ioctl, &cmd);                      \
106		old_errno = errno;                                       \
107		cmd.cmd.size = sizeof(struct _struct) + 1;               \
108		cmd.extra = 0;                                           \
109		if (rc) {                                                \
110			EXPECT_ERRNO(old_errno,                          \
111				     ioctl(self->fd, _ioctl, &cmd));     \
112		} else {                                                 \
113			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
114		}                                                        \
115	}
116
117	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
118	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
119	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
120	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
121	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
122	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
123		    out_iova_alignment);
124	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
125		    allowed_iovas);
126	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
127	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
128	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
129	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
130	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
131#undef TEST_LENGTH
132}
133
134TEST_F(iommufd, cmd_ex_fail)
135{
136	struct {
137		struct iommu_destroy cmd;
138		__u64 future;
139	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
140
141	/* object id is invalid and command is longer */
142	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
143	/* future area is non-zero */
144	cmd.future = 1;
145	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
146	/* Original command "works" */
147	cmd.cmd.size = sizeof(cmd.cmd);
148	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
149	/* Short command fails */
150	cmd.cmd.size = sizeof(cmd.cmd) - 1;
151	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152}
153
154TEST_F(iommufd, global_options)
155{
156	struct iommu_option cmd = {
157		.size = sizeof(cmd),
158		.option_id = IOMMU_OPTION_RLIMIT_MODE,
159		.op = IOMMU_OPTION_OP_GET,
160		.val64 = 1,
161	};
162
163	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
164	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
165	ASSERT_EQ(0, cmd.val64);
166
167	/* This requires root */
168	cmd.op = IOMMU_OPTION_OP_SET;
169	cmd.val64 = 1;
170	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
171	cmd.val64 = 2;
172	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
173
174	cmd.op = IOMMU_OPTION_OP_GET;
175	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
176	ASSERT_EQ(1, cmd.val64);
177
178	cmd.op = IOMMU_OPTION_OP_SET;
179	cmd.val64 = 0;
180	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181
182	cmd.op = IOMMU_OPTION_OP_GET;
183	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
184	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
185	cmd.op = IOMMU_OPTION_OP_SET;
186	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
187}
188
189FIXTURE(iommufd_ioas)
190{
191	int fd;
192	uint32_t ioas_id;
193	uint32_t stdev_id;
194	uint32_t hwpt_id;
195	uint32_t device_id;
196	uint64_t base_iova;
197};
198
199FIXTURE_VARIANT(iommufd_ioas)
200{
201	unsigned int mock_domains;
202	unsigned int memory_limit;
203};
204
205FIXTURE_SETUP(iommufd_ioas)
206{
207	unsigned int i;
208
209
210	self->fd = open("/dev/iommu", O_RDWR);
211	ASSERT_NE(-1, self->fd);
212	test_ioctl_ioas_alloc(&self->ioas_id);
213
214	if (!variant->memory_limit) {
215		test_ioctl_set_default_memory_limit();
216	} else {
217		test_ioctl_set_temp_memory_limit(variant->memory_limit);
218	}
219
220	for (i = 0; i != variant->mock_domains; i++) {
221		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
222				     &self->hwpt_id, &self->device_id);
223		self->base_iova = MOCK_APERTURE_START;
224	}
225}
226
227FIXTURE_TEARDOWN(iommufd_ioas)
228{
229	test_ioctl_set_default_memory_limit();
230	teardown_iommufd(self->fd, _metadata);
231}
232
233FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
234{
235};
236
237FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
238{
239	.mock_domains = 1,
240};
241
242FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
243{
244	.mock_domains = 2,
245};
246
247FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
248{
249	.mock_domains = 1,
250	.memory_limit = 16,
251};
252
253TEST_F(iommufd_ioas, ioas_auto_destroy)
254{
255}
256
257TEST_F(iommufd_ioas, ioas_destroy)
258{
259	if (self->stdev_id) {
260		/* IOAS cannot be freed while a device has a HWPT using it */
261		EXPECT_ERRNO(EBUSY,
262			     _test_ioctl_destroy(self->fd, self->ioas_id));
263	} else {
264		/* Can allocate and manually free an IOAS table */
265		test_ioctl_destroy(self->ioas_id);
266	}
267}
268
269TEST_F(iommufd_ioas, alloc_hwpt_nested)
270{
271	const uint32_t min_data_len =
272		offsetofend(struct iommu_hwpt_selftest, iotlb);
273	struct iommu_hwpt_selftest data = {
274		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
275	};
276	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
277	uint32_t nested_hwpt_id[2] = {};
278	uint32_t num_inv;
279	uint32_t parent_hwpt_id = 0;
280	uint32_t parent_hwpt_id_not_work = 0;
281	uint32_t test_hwpt_id = 0;
282
283	if (self->device_id) {
284		/* Negative tests */
285		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
286				    &test_hwpt_id);
287		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
288				    &test_hwpt_id);
289
290		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
291				    IOMMU_HWPT_ALLOC_NEST_PARENT,
292				    &parent_hwpt_id);
293
294		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
295				    &parent_hwpt_id_not_work);
296
297		/* Negative nested tests */
298		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
299					   parent_hwpt_id, 0,
300					   &nested_hwpt_id[0],
301					   IOMMU_HWPT_DATA_NONE, &data,
302					   sizeof(data));
303		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
304					   parent_hwpt_id, 0,
305					   &nested_hwpt_id[0],
306					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
307					   sizeof(data));
308		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
309					   parent_hwpt_id, 0,
310					   &nested_hwpt_id[0],
311					   IOMMU_HWPT_DATA_SELFTEST, &data,
312					   min_data_len - 1);
313		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
314					   parent_hwpt_id, 0,
315					   &nested_hwpt_id[0],
316					   IOMMU_HWPT_DATA_SELFTEST, NULL,
317					   sizeof(data));
318		test_err_hwpt_alloc_nested(
319			EOPNOTSUPP, self->device_id, parent_hwpt_id,
320			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
321			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
322		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
323					   parent_hwpt_id_not_work, 0,
324					   &nested_hwpt_id[0],
325					   IOMMU_HWPT_DATA_SELFTEST, &data,
326					   sizeof(data));
327
328		/* Allocate two nested hwpts sharing one common parent hwpt */
329		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
330					   &nested_hwpt_id[0],
331					   IOMMU_HWPT_DATA_SELFTEST, &data,
332					   sizeof(data));
333		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
334					   &nested_hwpt_id[1],
335					   IOMMU_HWPT_DATA_SELFTEST, &data,
336					   sizeof(data));
337		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
338					      IOMMU_TEST_IOTLB_DEFAULT);
339		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
340					      IOMMU_TEST_IOTLB_DEFAULT);
341
342		/* Negative test: a nested hwpt on top of a nested hwpt */
343		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
344					   nested_hwpt_id[0], 0, &test_hwpt_id,
345					   IOMMU_HWPT_DATA_SELFTEST, &data,
346					   sizeof(data));
347		/* Negative test: parent hwpt now cannot be freed */
348		EXPECT_ERRNO(EBUSY,
349			     _test_ioctl_destroy(self->fd, parent_hwpt_id));
350
351		/* hwpt_invalidate only supports a user-managed hwpt (nested) */
352		num_inv = 1;
353		test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
354					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
355					 sizeof(*inv_reqs), &num_inv);
356		assert(!num_inv);
357
358		/* Check data_type by passing zero-length array */
359		num_inv = 0;
360		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
361					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
362					 sizeof(*inv_reqs), &num_inv);
363		assert(!num_inv);
364
365		/* Negative test: Invalid data_type */
366		num_inv = 1;
367		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
368					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
369					 sizeof(*inv_reqs), &num_inv);
370		assert(!num_inv);
371
372		/* Negative test: structure size sanity */
373		num_inv = 1;
374		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
375					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
376					 sizeof(*inv_reqs) + 1, &num_inv);
377		assert(!num_inv);
378
379		num_inv = 1;
380		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
381					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
382					 1, &num_inv);
383		assert(!num_inv);
384
385		/* Negative test: invalid flag is passed */
386		num_inv = 1;
387		inv_reqs[0].flags = 0xffffffff;
388		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
389					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
390					 sizeof(*inv_reqs), &num_inv);
391		assert(!num_inv);
392
393		/* Negative test: invalid data_uptr when array is not empty */
394		num_inv = 1;
395		inv_reqs[0].flags = 0;
396		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
397					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
398					 sizeof(*inv_reqs), &num_inv);
399		assert(!num_inv);
400
401		/* Negative test: invalid entry_len when array is not empty */
402		num_inv = 1;
403		inv_reqs[0].flags = 0;
404		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
405					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
406					 0, &num_inv);
407		assert(!num_inv);
408
409		/* Negative test: invalid iotlb_id */
410		num_inv = 1;
411		inv_reqs[0].flags = 0;
412		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
413		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
414					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
415					 sizeof(*inv_reqs), &num_inv);
416		assert(!num_inv);
417
418		/*
419		 * Invalidate the 1st iotlb entry but fail the 2nd request
420		 * due to invalid flags configuration in the 2nd request.
421		 */
422		num_inv = 2;
423		inv_reqs[0].flags = 0;
424		inv_reqs[0].iotlb_id = 0;
425		inv_reqs[1].flags = 0xffffffff;
426		inv_reqs[1].iotlb_id = 1;
427		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
428					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
429					 sizeof(*inv_reqs), &num_inv);
430		assert(num_inv == 1);
431		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
432		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
433					  IOMMU_TEST_IOTLB_DEFAULT);
434		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
435					  IOMMU_TEST_IOTLB_DEFAULT);
436		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
437					  IOMMU_TEST_IOTLB_DEFAULT);
438
439		/*
440		 * Invalidate the 1st iotlb entry but fail the 2nd request
441		 * due to invalid iotlb_id configuration in the 2nd request.
442		 */
443		num_inv = 2;
444		inv_reqs[0].flags = 0;
445		inv_reqs[0].iotlb_id = 0;
446		inv_reqs[1].flags = 0;
447		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
448		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
449					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
450					 sizeof(*inv_reqs), &num_inv);
451		assert(num_inv == 1);
452		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
453		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
454					  IOMMU_TEST_IOTLB_DEFAULT);
455		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
456					  IOMMU_TEST_IOTLB_DEFAULT);
457		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
458					  IOMMU_TEST_IOTLB_DEFAULT);
459
460		/* Invalidate the 2nd iotlb entry and verify */
461		num_inv = 1;
462		inv_reqs[0].flags = 0;
463		inv_reqs[0].iotlb_id = 1;
464		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
465					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
466					 sizeof(*inv_reqs), &num_inv);
467		assert(num_inv == 1);
468		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
469		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
470		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
471					  IOMMU_TEST_IOTLB_DEFAULT);
472		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
473					  IOMMU_TEST_IOTLB_DEFAULT);
474
475		/* Invalidate the 3rd and 4th iotlb entries and verify */
476		num_inv = 2;
477		inv_reqs[0].flags = 0;
478		inv_reqs[0].iotlb_id = 2;
479		inv_reqs[1].flags = 0;
480		inv_reqs[1].iotlb_id = 3;
481		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
482					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
483					 sizeof(*inv_reqs), &num_inv);
484		assert(num_inv == 2);
485		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
486
487		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
488		num_inv = 1;
489		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
490		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
491					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
492					 sizeof(*inv_reqs), &num_inv);
493		assert(num_inv == 1);
494		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
495
496		/* Attach device to nested_hwpt_id[0] that then will be busy */
497		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
498		EXPECT_ERRNO(EBUSY,
499			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
500
501		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
502		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
503		EXPECT_ERRNO(EBUSY,
504			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
505		test_ioctl_destroy(nested_hwpt_id[0]);
506
507		/* Detach from nested_hwpt_id[1] and destroy it */
508		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
509		test_ioctl_destroy(nested_hwpt_id[1]);
510
511		/* Detach from the parent hw_pagetable and destroy it */
512		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
513		test_ioctl_destroy(parent_hwpt_id);
514		test_ioctl_destroy(parent_hwpt_id_not_work);
515	} else {
516		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
517				    &parent_hwpt_id);
518		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
519					   parent_hwpt_id, 0,
520					   &nested_hwpt_id[0],
521					   IOMMU_HWPT_DATA_SELFTEST, &data,
522					   sizeof(data));
523		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
524					   parent_hwpt_id, 0,
525					   &nested_hwpt_id[1],
526					   IOMMU_HWPT_DATA_SELFTEST, &data,
527					   sizeof(data));
528		test_err_mock_domain_replace(ENOENT, self->stdev_id,
529					     nested_hwpt_id[0]);
530		test_err_mock_domain_replace(ENOENT, self->stdev_id,
531					     nested_hwpt_id[1]);
532	}
533}
534
535TEST_F(iommufd_ioas, hwpt_attach)
536{
537	/* Create a device attached directly to a hwpt */
538	if (self->stdev_id) {
539		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
540	} else {
541		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
542	}
543}
544
545TEST_F(iommufd_ioas, ioas_area_destroy)
546{
547	/* Adding an area does not change ability to destroy */
548	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
549	if (self->stdev_id)
550		EXPECT_ERRNO(EBUSY,
551			     _test_ioctl_destroy(self->fd, self->ioas_id));
552	else
553		test_ioctl_destroy(self->ioas_id);
554}
555
556TEST_F(iommufd_ioas, ioas_area_auto_destroy)
557{
558	int i;
559
560	/* Can allocate and automatically free an IOAS table with many areas */
561	for (i = 0; i != 10; i++) {
562		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
563					  self->base_iova + i * PAGE_SIZE);
564	}
565}
566
567TEST_F(iommufd_ioas, get_hw_info)
568{
569	struct iommu_test_hw_info buffer_exact;
570	struct iommu_test_hw_info_buffer_larger {
571		struct iommu_test_hw_info info;
572		uint64_t trailing_bytes;
573	} buffer_larger;
574	struct iommu_test_hw_info_buffer_smaller {
575		__u32 flags;
576	} buffer_smaller;
577
578	if (self->device_id) {
579		/* Provide a zero-size user_buffer */
580		test_cmd_get_hw_info(self->device_id, NULL, 0);
581		/* Provide a user_buffer with exact size */
582		test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
583		/*
584		 * Provide a user_buffer with size larger than the exact size to check if
585		 * kernel zero the trailing bytes.
586		 */
587		test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
588		/*
589		 * Provide a user_buffer with size smaller than the exact size to check if
590		 * the fields within the size range still gets updated.
591		 */
592		test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
593	} else {
594		test_err_get_hw_info(ENOENT, self->device_id,
595				     &buffer_exact, sizeof(buffer_exact));
596		test_err_get_hw_info(ENOENT, self->device_id,
597				     &buffer_larger, sizeof(buffer_larger));
598	}
599}
600
601TEST_F(iommufd_ioas, area)
602{
603	int i;
604
605	/* Unmap fails if nothing is mapped */
606	for (i = 0; i != 10; i++)
607		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
608
609	/* Unmap works */
610	for (i = 0; i != 10; i++)
611		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
612					  self->base_iova + i * PAGE_SIZE);
613	for (i = 0; i != 10; i++)
614		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
615				      PAGE_SIZE);
616
617	/* Split fails */
618	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
619				  self->base_iova + 16 * PAGE_SIZE);
620	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
621				  PAGE_SIZE);
622	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
623				  PAGE_SIZE);
624
625	/* Over map fails */
626	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
627				      self->base_iova + 16 * PAGE_SIZE);
628	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
629				      self->base_iova + 16 * PAGE_SIZE);
630	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
631				      self->base_iova + 17 * PAGE_SIZE);
632	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
633				      self->base_iova + 15 * PAGE_SIZE);
634	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
635				      self->base_iova + 15 * PAGE_SIZE);
636
637	/* unmap all works */
638	test_ioctl_ioas_unmap(0, UINT64_MAX);
639
640	/* Unmap all succeeds on an empty IOAS */
641	test_ioctl_ioas_unmap(0, UINT64_MAX);
642}
643
644TEST_F(iommufd_ioas, unmap_fully_contained_areas)
645{
646	uint64_t unmap_len;
647	int i;
648
649	/* Give no_domain some space to rewind base_iova */
650	self->base_iova += 4 * PAGE_SIZE;
651
652	for (i = 0; i != 4; i++)
653		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
654					  self->base_iova + i * 16 * PAGE_SIZE);
655
656	/* Unmap not fully contained area doesn't work */
657	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
658				  8 * PAGE_SIZE);
659	test_err_ioctl_ioas_unmap(ENOENT,
660				  self->base_iova + 3 * 16 * PAGE_SIZE +
661					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
662				  8 * PAGE_SIZE);
663
664	/* Unmap fully contained areas works */
665	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
666					    self->base_iova - 4 * PAGE_SIZE,
667					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
668						    4 * PAGE_SIZE,
669					    &unmap_len));
670	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
671}
672
673TEST_F(iommufd_ioas, area_auto_iova)
674{
675	struct iommu_test_cmd test_cmd = {
676		.size = sizeof(test_cmd),
677		.op = IOMMU_TEST_OP_ADD_RESERVED,
678		.id = self->ioas_id,
679		.add_reserved = { .start = PAGE_SIZE * 4,
680				  .length = PAGE_SIZE * 100 },
681	};
682	struct iommu_iova_range ranges[1] = {};
683	struct iommu_ioas_allow_iovas allow_cmd = {
684		.size = sizeof(allow_cmd),
685		.ioas_id = self->ioas_id,
686		.num_iovas = 1,
687		.allowed_iovas = (uintptr_t)ranges,
688	};
689	__u64 iovas[10];
690	int i;
691
692	/* Simple 4k pages */
693	for (i = 0; i != 10; i++)
694		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
695	for (i = 0; i != 10; i++)
696		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
697
698	/* Kernel automatically aligns IOVAs properly */
699	for (i = 0; i != 10; i++) {
700		size_t length = PAGE_SIZE * (i + 1);
701
702		if (self->stdev_id) {
703			test_ioctl_ioas_map(buffer, length, &iovas[i]);
704		} else {
705			test_ioctl_ioas_map((void *)(1UL << 31), length,
706					    &iovas[i]);
707		}
708		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
709	}
710	for (i = 0; i != 10; i++)
711		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
712
713	/* Avoids a reserved region */
714	ASSERT_EQ(0,
715		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
716			&test_cmd));
717	for (i = 0; i != 10; i++) {
718		size_t length = PAGE_SIZE * (i + 1);
719
720		test_ioctl_ioas_map(buffer, length, &iovas[i]);
721		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
722		EXPECT_EQ(false,
723			  iovas[i] > test_cmd.add_reserved.start &&
724				  iovas[i] <
725					  test_cmd.add_reserved.start +
726						  test_cmd.add_reserved.length);
727	}
728	for (i = 0; i != 10; i++)
729		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
730
731	/* Allowed region intersects with a reserved region */
732	ranges[0].start = PAGE_SIZE;
733	ranges[0].last = PAGE_SIZE * 600;
734	EXPECT_ERRNO(EADDRINUSE,
735		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
736
737	/* Allocate from an allowed region */
738	if (self->stdev_id) {
739		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
740		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
741	} else {
742		ranges[0].start = PAGE_SIZE * 200;
743		ranges[0].last = PAGE_SIZE * 600 - 1;
744	}
745	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
746	for (i = 0; i != 10; i++) {
747		size_t length = PAGE_SIZE * (i + 1);
748
749		test_ioctl_ioas_map(buffer, length, &iovas[i]);
750		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
751		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
752		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
753		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
754		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
755	}
756	for (i = 0; i != 10; i++)
757		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
758}
759
760TEST_F(iommufd_ioas, area_allowed)
761{
762	struct iommu_test_cmd test_cmd = {
763		.size = sizeof(test_cmd),
764		.op = IOMMU_TEST_OP_ADD_RESERVED,
765		.id = self->ioas_id,
766		.add_reserved = { .start = PAGE_SIZE * 4,
767				  .length = PAGE_SIZE * 100 },
768	};
769	struct iommu_iova_range ranges[1] = {};
770	struct iommu_ioas_allow_iovas allow_cmd = {
771		.size = sizeof(allow_cmd),
772		.ioas_id = self->ioas_id,
773		.num_iovas = 1,
774		.allowed_iovas = (uintptr_t)ranges,
775	};
776
777	/* Reserved intersects an allowed */
778	allow_cmd.num_iovas = 1;
779	ranges[0].start = self->base_iova;
780	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
781	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
782	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
783	test_cmd.add_reserved.length = PAGE_SIZE;
784	EXPECT_ERRNO(EADDRINUSE,
785		     ioctl(self->fd,
786			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
787			   &test_cmd));
788	allow_cmd.num_iovas = 0;
789	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
790
791	/* Allowed intersects a reserved */
792	ASSERT_EQ(0,
793		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
794			&test_cmd));
795	allow_cmd.num_iovas = 1;
796	ranges[0].start = self->base_iova;
797	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
798	EXPECT_ERRNO(EADDRINUSE,
799		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
800}
801
802TEST_F(iommufd_ioas, copy_area)
803{
804	struct iommu_ioas_copy copy_cmd = {
805		.size = sizeof(copy_cmd),
806		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
807		.dst_ioas_id = self->ioas_id,
808		.src_ioas_id = self->ioas_id,
809		.length = PAGE_SIZE,
810	};
811
812	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
813
814	/* Copy inside a single IOAS */
815	copy_cmd.src_iova = self->base_iova;
816	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
817	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
818
819	/* Copy between IOAS's */
820	copy_cmd.src_iova = self->base_iova;
821	copy_cmd.dst_iova = 0;
822	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
823	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
824}
825
826TEST_F(iommufd_ioas, iova_ranges)
827{
828	struct iommu_test_cmd test_cmd = {
829		.size = sizeof(test_cmd),
830		.op = IOMMU_TEST_OP_ADD_RESERVED,
831		.id = self->ioas_id,
832		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
833	};
834	struct iommu_iova_range *ranges = buffer;
835	struct iommu_ioas_iova_ranges ranges_cmd = {
836		.size = sizeof(ranges_cmd),
837		.ioas_id = self->ioas_id,
838		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
839		.allowed_iovas = (uintptr_t)ranges,
840	};
841
842	/* Range can be read */
843	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
844	EXPECT_EQ(1, ranges_cmd.num_iovas);
845	if (!self->stdev_id) {
846		EXPECT_EQ(0, ranges[0].start);
847		EXPECT_EQ(SIZE_MAX, ranges[0].last);
848		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
849	} else {
850		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
851		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
852		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
853	}
854
855	/* Buffer too small */
856	memset(ranges, 0, BUFFER_SIZE);
857	ranges_cmd.num_iovas = 0;
858	EXPECT_ERRNO(EMSGSIZE,
859		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
860	EXPECT_EQ(1, ranges_cmd.num_iovas);
861	EXPECT_EQ(0, ranges[0].start);
862	EXPECT_EQ(0, ranges[0].last);
863
864	/* 2 ranges */
865	ASSERT_EQ(0,
866		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
867			&test_cmd));
868	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
869	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
870	if (!self->stdev_id) {
871		EXPECT_EQ(2, ranges_cmd.num_iovas);
872		EXPECT_EQ(0, ranges[0].start);
873		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
874		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
875		EXPECT_EQ(SIZE_MAX, ranges[1].last);
876	} else {
877		EXPECT_EQ(1, ranges_cmd.num_iovas);
878		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
879		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
880	}
881
882	/* Buffer too small */
883	memset(ranges, 0, BUFFER_SIZE);
884	ranges_cmd.num_iovas = 1;
885	if (!self->stdev_id) {
886		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
887					     &ranges_cmd));
888		EXPECT_EQ(2, ranges_cmd.num_iovas);
889		EXPECT_EQ(0, ranges[0].start);
890		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
891	} else {
892		ASSERT_EQ(0,
893			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
894		EXPECT_EQ(1, ranges_cmd.num_iovas);
895		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
896		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
897	}
898	EXPECT_EQ(0, ranges[1].start);
899	EXPECT_EQ(0, ranges[1].last);
900}
901
902TEST_F(iommufd_ioas, access_domain_destory)
903{
904	struct iommu_test_cmd access_cmd = {
905		.size = sizeof(access_cmd),
906		.op = IOMMU_TEST_OP_ACCESS_PAGES,
907		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
908				  .length = PAGE_SIZE},
909	};
910	size_t buf_size = 2 * HUGEPAGE_SIZE;
911	uint8_t *buf;
912
913	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
914		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
915		   0);
916	ASSERT_NE(MAP_FAILED, buf);
917	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
918
919	test_cmd_create_access(self->ioas_id, &access_cmd.id,
920			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
921	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
922	ASSERT_EQ(0,
923		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
924			&access_cmd));
925
926	/* Causes a complicated unpin across a huge page boundary */
927	if (self->stdev_id)
928		test_ioctl_destroy(self->stdev_id);
929
930	test_cmd_destroy_access_pages(
931		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
932	test_cmd_destroy_access(access_cmd.id);
933	ASSERT_EQ(0, munmap(buf, buf_size));
934}
935
936TEST_F(iommufd_ioas, access_pin)
937{
938	struct iommu_test_cmd access_cmd = {
939		.size = sizeof(access_cmd),
940		.op = IOMMU_TEST_OP_ACCESS_PAGES,
941		.access_pages = { .iova = MOCK_APERTURE_START,
942				  .length = BUFFER_SIZE,
943				  .uptr = (uintptr_t)buffer },
944	};
945	struct iommu_test_cmd check_map_cmd = {
946		.size = sizeof(check_map_cmd),
947		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
948		.check_map = { .iova = MOCK_APERTURE_START,
949			       .length = BUFFER_SIZE,
950			       .uptr = (uintptr_t)buffer },
951	};
952	uint32_t access_pages_id;
953	unsigned int npages;
954
955	test_cmd_create_access(self->ioas_id, &access_cmd.id,
956			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
957
958	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
959		uint32_t mock_stdev_id;
960		uint32_t mock_hwpt_id;
961
962		access_cmd.access_pages.length = npages * PAGE_SIZE;
963
964		/* Single map/unmap */
965		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
966					  MOCK_APERTURE_START);
967		ASSERT_EQ(0, ioctl(self->fd,
968				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
969				   &access_cmd));
970		test_cmd_destroy_access_pages(
971			access_cmd.id,
972			access_cmd.access_pages.out_access_pages_id);
973
974		/* Double user */
975		ASSERT_EQ(0, ioctl(self->fd,
976				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
977				   &access_cmd));
978		access_pages_id = access_cmd.access_pages.out_access_pages_id;
979		ASSERT_EQ(0, ioctl(self->fd,
980				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
981				   &access_cmd));
982		test_cmd_destroy_access_pages(
983			access_cmd.id,
984			access_cmd.access_pages.out_access_pages_id);
985		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
986
987		/* Add/remove a domain with a user */
988		ASSERT_EQ(0, ioctl(self->fd,
989				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
990				   &access_cmd));
991		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
992				     &mock_hwpt_id, NULL);
993		check_map_cmd.id = mock_hwpt_id;
994		ASSERT_EQ(0, ioctl(self->fd,
995				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
996				   &check_map_cmd));
997
998		test_ioctl_destroy(mock_stdev_id);
999		test_cmd_destroy_access_pages(
1000			access_cmd.id,
1001			access_cmd.access_pages.out_access_pages_id);
1002
1003		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1004	}
1005	test_cmd_destroy_access(access_cmd.id);
1006}
1007
1008TEST_F(iommufd_ioas, access_pin_unmap)
1009{
1010	struct iommu_test_cmd access_pages_cmd = {
1011		.size = sizeof(access_pages_cmd),
1012		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1013		.access_pages = { .iova = MOCK_APERTURE_START,
1014				  .length = BUFFER_SIZE,
1015				  .uptr = (uintptr_t)buffer },
1016	};
1017
1018	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1019			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1020	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1021	ASSERT_EQ(0,
1022		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1023			&access_pages_cmd));
1024
1025	/* Trigger the unmap op */
1026	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1027
1028	/* kernel removed the item for us */
1029	test_err_destroy_access_pages(
1030		ENOENT, access_pages_cmd.id,
1031		access_pages_cmd.access_pages.out_access_pages_id);
1032}
1033
1034static void check_access_rw(struct __test_metadata *_metadata, int fd,
1035			    unsigned int access_id, uint64_t iova,
1036			    unsigned int def_flags)
1037{
1038	uint16_t tmp[32];
1039	struct iommu_test_cmd access_cmd = {
1040		.size = sizeof(access_cmd),
1041		.op = IOMMU_TEST_OP_ACCESS_RW,
1042		.id = access_id,
1043		.access_rw = { .uptr = (uintptr_t)tmp },
1044	};
1045	uint16_t *buffer16 = buffer;
1046	unsigned int i;
1047	void *tmp2;
1048
1049	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1050		buffer16[i] = rand();
1051
1052	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1053	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1054	     access_cmd.access_rw.iova++) {
1055		for (access_cmd.access_rw.length = 1;
1056		     access_cmd.access_rw.length < sizeof(tmp);
1057		     access_cmd.access_rw.length++) {
1058			access_cmd.access_rw.flags = def_flags;
1059			ASSERT_EQ(0, ioctl(fd,
1060					   _IOMMU_TEST_CMD(
1061						   IOMMU_TEST_OP_ACCESS_RW),
1062					   &access_cmd));
1063			ASSERT_EQ(0,
1064				  memcmp(buffer + (access_cmd.access_rw.iova -
1065						   iova),
1066					 tmp, access_cmd.access_rw.length));
1067
1068			for (i = 0; i != ARRAY_SIZE(tmp); i++)
1069				tmp[i] = rand();
1070			access_cmd.access_rw.flags = def_flags |
1071						     MOCK_ACCESS_RW_WRITE;
1072			ASSERT_EQ(0, ioctl(fd,
1073					   _IOMMU_TEST_CMD(
1074						   IOMMU_TEST_OP_ACCESS_RW),
1075					   &access_cmd));
1076			ASSERT_EQ(0,
1077				  memcmp(buffer + (access_cmd.access_rw.iova -
1078						   iova),
1079					 tmp, access_cmd.access_rw.length));
1080		}
1081	}
1082
1083	/* Multi-page test */
1084	tmp2 = malloc(BUFFER_SIZE);
1085	ASSERT_NE(NULL, tmp2);
1086	access_cmd.access_rw.iova = iova;
1087	access_cmd.access_rw.length = BUFFER_SIZE;
1088	access_cmd.access_rw.flags = def_flags;
1089	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1090	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1091			   &access_cmd));
1092	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1093	free(tmp2);
1094}
1095
1096TEST_F(iommufd_ioas, access_rw)
1097{
1098	__u32 access_id;
1099	__u64 iova;
1100
1101	test_cmd_create_access(self->ioas_id, &access_id, 0);
1102	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1103	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1104	check_access_rw(_metadata, self->fd, access_id, iova,
1105			MOCK_ACCESS_RW_SLOW_PATH);
1106	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1107	test_cmd_destroy_access(access_id);
1108}
1109
1110TEST_F(iommufd_ioas, access_rw_unaligned)
1111{
1112	__u32 access_id;
1113	__u64 iova;
1114
1115	test_cmd_create_access(self->ioas_id, &access_id, 0);
1116
1117	/* Unaligned pages */
1118	iova = self->base_iova + MOCK_PAGE_SIZE;
1119	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1120	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1121	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1122	test_cmd_destroy_access(access_id);
1123}
1124
1125TEST_F(iommufd_ioas, fork_gone)
1126{
1127	__u32 access_id;
1128	pid_t child;
1129
1130	test_cmd_create_access(self->ioas_id, &access_id, 0);
1131
1132	/* Create a mapping with a different mm */
1133	child = fork();
1134	if (!child) {
1135		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1136					  MOCK_APERTURE_START);
1137		exit(0);
1138	}
1139	ASSERT_NE(-1, child);
1140	ASSERT_EQ(child, waitpid(child, NULL, 0));
1141
1142	if (self->stdev_id) {
1143		/*
1144		 * If a domain already existed then everything was pinned within
1145		 * the fork, so this copies from one domain to another.
1146		 */
1147		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1148		check_access_rw(_metadata, self->fd, access_id,
1149				MOCK_APERTURE_START, 0);
1150
1151	} else {
1152		/*
1153		 * Otherwise we need to actually pin pages which can't happen
1154		 * since the fork is gone.
1155		 */
1156		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1157	}
1158
1159	test_cmd_destroy_access(access_id);
1160}
1161
1162TEST_F(iommufd_ioas, fork_present)
1163{
1164	__u32 access_id;
1165	int pipefds[2];
1166	uint64_t tmp;
1167	pid_t child;
1168	int efd;
1169
1170	test_cmd_create_access(self->ioas_id, &access_id, 0);
1171
1172	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1173	efd = eventfd(0, EFD_CLOEXEC);
1174	ASSERT_NE(-1, efd);
1175
1176	/* Create a mapping with a different mm */
1177	child = fork();
1178	if (!child) {
1179		__u64 iova;
1180		uint64_t one = 1;
1181
1182		close(pipefds[1]);
1183		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1184					  MOCK_APERTURE_START);
1185		if (write(efd, &one, sizeof(one)) != sizeof(one))
1186			exit(100);
1187		if (read(pipefds[0], &iova, 1) != 1)
1188			exit(100);
1189		exit(0);
1190	}
1191	close(pipefds[0]);
1192	ASSERT_NE(-1, child);
1193	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1194
1195	/* Read pages from the remote process */
1196	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1197	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1198
1199	ASSERT_EQ(0, close(pipefds[1]));
1200	ASSERT_EQ(child, waitpid(child, NULL, 0));
1201
1202	test_cmd_destroy_access(access_id);
1203}
1204
1205TEST_F(iommufd_ioas, ioas_option_huge_pages)
1206{
1207	struct iommu_option cmd = {
1208		.size = sizeof(cmd),
1209		.option_id = IOMMU_OPTION_HUGE_PAGES,
1210		.op = IOMMU_OPTION_OP_GET,
1211		.val64 = 3,
1212		.object_id = self->ioas_id,
1213	};
1214
1215	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1216	ASSERT_EQ(1, cmd.val64);
1217
1218	cmd.op = IOMMU_OPTION_OP_SET;
1219	cmd.val64 = 0;
1220	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1221
1222	cmd.op = IOMMU_OPTION_OP_GET;
1223	cmd.val64 = 3;
1224	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1225	ASSERT_EQ(0, cmd.val64);
1226
1227	cmd.op = IOMMU_OPTION_OP_SET;
1228	cmd.val64 = 2;
1229	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1230
1231	cmd.op = IOMMU_OPTION_OP_SET;
1232	cmd.val64 = 1;
1233	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1234}
1235
1236TEST_F(iommufd_ioas, ioas_iova_alloc)
1237{
1238	unsigned int length;
1239	__u64 iova;
1240
1241	for (length = 1; length != PAGE_SIZE * 2; length++) {
1242		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1243			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1244		} else {
1245			test_ioctl_ioas_map(buffer, length, &iova);
1246			test_ioctl_ioas_unmap(iova, length);
1247		}
1248	}
1249}
1250
1251TEST_F(iommufd_ioas, ioas_align_change)
1252{
1253	struct iommu_option cmd = {
1254		.size = sizeof(cmd),
1255		.option_id = IOMMU_OPTION_HUGE_PAGES,
1256		.op = IOMMU_OPTION_OP_SET,
1257		.object_id = self->ioas_id,
1258		/* 0 means everything must be aligned to PAGE_SIZE */
1259		.val64 = 0,
1260	};
1261
1262	/*
1263	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1264	 * and map are present.
1265	 */
1266	if (variant->mock_domains)
1267		return;
1268
1269	/*
1270	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1271	 */
1272	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1273	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1274
1275	/* Misalignment is rejected at map time */
1276	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1277				      PAGE_SIZE,
1278				      MOCK_APERTURE_START + PAGE_SIZE);
1279	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1280
1281	/* Reduce alignment */
1282	cmd.val64 = 1;
1283	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1284
1285	/* Confirm misalignment is rejected during alignment upgrade */
1286	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1287				  MOCK_APERTURE_START + PAGE_SIZE);
1288	cmd.val64 = 0;
1289	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1290
1291	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1292	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1293}
1294
1295TEST_F(iommufd_ioas, copy_sweep)
1296{
1297	struct iommu_ioas_copy copy_cmd = {
1298		.size = sizeof(copy_cmd),
1299		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1300		.src_ioas_id = self->ioas_id,
1301		.dst_iova = MOCK_APERTURE_START,
1302		.length = MOCK_PAGE_SIZE,
1303	};
1304	unsigned int dst_ioas_id;
1305	uint64_t last_iova;
1306	uint64_t iova;
1307
1308	test_ioctl_ioas_alloc(&dst_ioas_id);
1309	copy_cmd.dst_ioas_id = dst_ioas_id;
1310
1311	if (variant->mock_domains)
1312		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1313	else
1314		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1315
1316	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1317				  MOCK_APERTURE_START);
1318
1319	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1320	     iova += 511) {
1321		copy_cmd.src_iova = iova;
1322		if (iova < MOCK_APERTURE_START ||
1323		    iova + copy_cmd.length - 1 > last_iova) {
1324			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1325						   &copy_cmd));
1326		} else {
1327			ASSERT_EQ(0,
1328				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1329			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1330						 copy_cmd.length);
1331		}
1332	}
1333
1334	test_ioctl_destroy(dst_ioas_id);
1335}
1336
1337FIXTURE(iommufd_mock_domain)
1338{
1339	int fd;
1340	uint32_t ioas_id;
1341	uint32_t hwpt_id;
1342	uint32_t hwpt_ids[2];
1343	uint32_t stdev_ids[2];
1344	uint32_t idev_ids[2];
1345	int mmap_flags;
1346	size_t mmap_buf_size;
1347};
1348
1349FIXTURE_VARIANT(iommufd_mock_domain)
1350{
1351	unsigned int mock_domains;
1352	bool hugepages;
1353};
1354
1355FIXTURE_SETUP(iommufd_mock_domain)
1356{
1357	unsigned int i;
1358
1359	self->fd = open("/dev/iommu", O_RDWR);
1360	ASSERT_NE(-1, self->fd);
1361	test_ioctl_ioas_alloc(&self->ioas_id);
1362
1363	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1364
1365	for (i = 0; i != variant->mock_domains; i++)
1366		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1367				     &self->hwpt_ids[i], &self->idev_ids[i]);
1368	self->hwpt_id = self->hwpt_ids[0];
1369
1370	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1371	self->mmap_buf_size = PAGE_SIZE * 8;
1372	if (variant->hugepages) {
1373		/*
1374		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1375		 * not available.
1376		 */
1377		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1378		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1379	}
1380}
1381
1382FIXTURE_TEARDOWN(iommufd_mock_domain)
1383{
1384	teardown_iommufd(self->fd, _metadata);
1385}
1386
1387FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1388{
1389	.mock_domains = 1,
1390	.hugepages = false,
1391};
1392
1393FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1394{
1395	.mock_domains = 2,
1396	.hugepages = false,
1397};
1398
1399FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1400{
1401	.mock_domains = 1,
1402	.hugepages = true,
1403};
1404
1405FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1406{
1407	.mock_domains = 2,
1408	.hugepages = true,
1409};
1410
1411/* Have the kernel check that the user pages made it to the iommu_domain */
1412#define check_mock_iova(_ptr, _iova, _length)                                \
1413	({                                                                   \
1414		struct iommu_test_cmd check_map_cmd = {                      \
1415			.size = sizeof(check_map_cmd),                       \
1416			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1417			.id = self->hwpt_id,                                 \
1418			.check_map = { .iova = _iova,                        \
1419				       .length = _length,                    \
1420				       .uptr = (uintptr_t)(_ptr) },          \
1421		};                                                           \
1422		ASSERT_EQ(0,                                                 \
1423			  ioctl(self->fd,                                    \
1424				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1425				&check_map_cmd));                            \
1426		if (self->hwpt_ids[1]) {                                     \
1427			check_map_cmd.id = self->hwpt_ids[1];                \
1428			ASSERT_EQ(0,                                         \
1429				  ioctl(self->fd,                            \
1430					_IOMMU_TEST_CMD(                     \
1431						IOMMU_TEST_OP_MD_CHECK_MAP), \
1432					&check_map_cmd));                    \
1433		}                                                            \
1434	})
1435
1436TEST_F(iommufd_mock_domain, basic)
1437{
1438	size_t buf_size = self->mmap_buf_size;
1439	uint8_t *buf;
1440	__u64 iova;
1441
1442	/* Simple one page map */
1443	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1444	check_mock_iova(buffer, iova, PAGE_SIZE);
1445
1446	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1447		   0);
1448	ASSERT_NE(MAP_FAILED, buf);
1449
1450	/* EFAULT half way through mapping */
1451	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1452	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1453
1454	/* EFAULT on first page */
1455	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1456	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1457}
1458
1459TEST_F(iommufd_mock_domain, ro_unshare)
1460{
1461	uint8_t *buf;
1462	__u64 iova;
1463	int fd;
1464
1465	fd = open("/proc/self/exe", O_RDONLY);
1466	ASSERT_NE(-1, fd);
1467
1468	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1469	ASSERT_NE(MAP_FAILED, buf);
1470	close(fd);
1471
1472	/*
1473	 * There have been lots of changes to the "unshare" mechanism in
1474	 * get_user_pages(), make sure it works right. The write to the page
1475	 * after we map it for reading should not change the assigned PFN.
1476	 */
1477	ASSERT_EQ(0,
1478		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1479				       &iova, IOMMU_IOAS_MAP_READABLE));
1480	check_mock_iova(buf, iova, PAGE_SIZE);
1481	memset(buf, 1, PAGE_SIZE);
1482	check_mock_iova(buf, iova, PAGE_SIZE);
1483	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1484}
1485
1486TEST_F(iommufd_mock_domain, all_aligns)
1487{
1488	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1489						MOCK_PAGE_SIZE;
1490	size_t buf_size = self->mmap_buf_size;
1491	unsigned int start;
1492	unsigned int end;
1493	uint8_t *buf;
1494
1495	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1496		   0);
1497	ASSERT_NE(MAP_FAILED, buf);
1498	check_refs(buf, buf_size, 0);
1499
1500	/*
1501	 * Map every combination of page size and alignment within a big region,
1502	 * less for hugepage case as it takes so long to finish.
1503	 */
1504	for (start = 0; start < buf_size; start += test_step) {
1505		if (variant->hugepages)
1506			end = buf_size;
1507		else
1508			end = start + MOCK_PAGE_SIZE;
1509		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1510			size_t length = end - start;
1511			__u64 iova;
1512
1513			test_ioctl_ioas_map(buf + start, length, &iova);
1514			check_mock_iova(buf + start, iova, length);
1515			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1516				   end / PAGE_SIZE * PAGE_SIZE -
1517					   start / PAGE_SIZE * PAGE_SIZE,
1518				   1);
1519
1520			test_ioctl_ioas_unmap(iova, length);
1521		}
1522	}
1523	check_refs(buf, buf_size, 0);
1524	ASSERT_EQ(0, munmap(buf, buf_size));
1525}
1526
1527TEST_F(iommufd_mock_domain, all_aligns_copy)
1528{
1529	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1530						MOCK_PAGE_SIZE;
1531	size_t buf_size = self->mmap_buf_size;
1532	unsigned int start;
1533	unsigned int end;
1534	uint8_t *buf;
1535
1536	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1537		   0);
1538	ASSERT_NE(MAP_FAILED, buf);
1539	check_refs(buf, buf_size, 0);
1540
1541	/*
1542	 * Map every combination of page size and alignment within a big region,
1543	 * less for hugepage case as it takes so long to finish.
1544	 */
1545	for (start = 0; start < buf_size; start += test_step) {
1546		if (variant->hugepages)
1547			end = buf_size;
1548		else
1549			end = start + MOCK_PAGE_SIZE;
1550		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1551			size_t length = end - start;
1552			unsigned int old_id;
1553			uint32_t mock_stdev_id;
1554			__u64 iova;
1555
1556			test_ioctl_ioas_map(buf + start, length, &iova);
1557
1558			/* Add and destroy a domain while the area exists */
1559			old_id = self->hwpt_ids[1];
1560			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1561					     &self->hwpt_ids[1], NULL);
1562
1563			check_mock_iova(buf + start, iova, length);
1564			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1565				   end / PAGE_SIZE * PAGE_SIZE -
1566					   start / PAGE_SIZE * PAGE_SIZE,
1567				   1);
1568
1569			test_ioctl_destroy(mock_stdev_id);
1570			self->hwpt_ids[1] = old_id;
1571
1572			test_ioctl_ioas_unmap(iova, length);
1573		}
1574	}
1575	check_refs(buf, buf_size, 0);
1576	ASSERT_EQ(0, munmap(buf, buf_size));
1577}
1578
1579TEST_F(iommufd_mock_domain, user_copy)
1580{
1581	struct iommu_test_cmd access_cmd = {
1582		.size = sizeof(access_cmd),
1583		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1584		.access_pages = { .length = BUFFER_SIZE,
1585				  .uptr = (uintptr_t)buffer },
1586	};
1587	struct iommu_ioas_copy copy_cmd = {
1588		.size = sizeof(copy_cmd),
1589		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1590		.dst_ioas_id = self->ioas_id,
1591		.dst_iova = MOCK_APERTURE_START,
1592		.length = BUFFER_SIZE,
1593	};
1594	struct iommu_ioas_unmap unmap_cmd = {
1595		.size = sizeof(unmap_cmd),
1596		.ioas_id = self->ioas_id,
1597		.iova = MOCK_APERTURE_START,
1598		.length = BUFFER_SIZE,
1599	};
1600	unsigned int new_ioas_id, ioas_id;
1601
1602	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1603	test_ioctl_ioas_alloc(&ioas_id);
1604	test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1605			       &copy_cmd.src_iova);
1606
1607	test_cmd_create_access(ioas_id, &access_cmd.id,
1608			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1609
1610	access_cmd.access_pages.iova = copy_cmd.src_iova;
1611	ASSERT_EQ(0,
1612		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1613			&access_cmd));
1614	copy_cmd.src_ioas_id = ioas_id;
1615	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1616	check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1617
1618	/* Now replace the ioas with a new one */
1619	test_ioctl_ioas_alloc(&new_ioas_id);
1620	test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1621			       &copy_cmd.src_iova);
1622	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1623
1624	/* Destroy the old ioas and cleanup copied mapping */
1625	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1626	test_ioctl_destroy(ioas_id);
1627
1628	/* Then run the same test again with the new ioas */
1629	access_cmd.access_pages.iova = copy_cmd.src_iova;
1630	ASSERT_EQ(0,
1631		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1632			&access_cmd));
1633	copy_cmd.src_ioas_id = new_ioas_id;
1634	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1635	check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1636
1637	test_cmd_destroy_access_pages(
1638		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1639	test_cmd_destroy_access(access_cmd.id);
1640
1641	test_ioctl_destroy(new_ioas_id);
1642}
1643
1644TEST_F(iommufd_mock_domain, replace)
1645{
1646	uint32_t ioas_id;
1647
1648	test_ioctl_ioas_alloc(&ioas_id);
1649
1650	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1651
1652	/*
1653	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1654	 * should get enoent when we try to use it.
1655	 */
1656	if (variant->mock_domains == 1)
1657		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1658					     self->hwpt_ids[0]);
1659
1660	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1661	if (variant->mock_domains >= 2) {
1662		test_cmd_mock_domain_replace(self->stdev_ids[0],
1663					     self->hwpt_ids[1]);
1664		test_cmd_mock_domain_replace(self->stdev_ids[0],
1665					     self->hwpt_ids[1]);
1666		test_cmd_mock_domain_replace(self->stdev_ids[0],
1667					     self->hwpt_ids[0]);
1668	}
1669
1670	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1671	test_ioctl_destroy(ioas_id);
1672}
1673
1674TEST_F(iommufd_mock_domain, alloc_hwpt)
1675{
1676	int i;
1677
1678	for (i = 0; i != variant->mock_domains; i++) {
1679		uint32_t hwpt_id[2];
1680		uint32_t stddev_id;
1681
1682		test_err_hwpt_alloc(EOPNOTSUPP,
1683				    self->idev_ids[i], self->ioas_id,
1684				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1685		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1686				    0, &hwpt_id[0]);
1687		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1688				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1689
1690		/* Do a hw_pagetable rotation test */
1691		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1692		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1693		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1694		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1695		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1696		test_ioctl_destroy(hwpt_id[1]);
1697
1698		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1699		test_ioctl_destroy(stddev_id);
1700		test_ioctl_destroy(hwpt_id[0]);
1701	}
1702}
1703
1704FIXTURE(iommufd_dirty_tracking)
1705{
1706	int fd;
1707	uint32_t ioas_id;
1708	uint32_t hwpt_id;
1709	uint32_t stdev_id;
1710	uint32_t idev_id;
1711	unsigned long page_size;
1712	unsigned long bitmap_size;
1713	void *bitmap;
1714	void *buffer;
1715};
1716
1717FIXTURE_VARIANT(iommufd_dirty_tracking)
1718{
1719	unsigned long buffer_size;
1720	bool hugepages;
1721};
1722
1723FIXTURE_SETUP(iommufd_dirty_tracking)
1724{
1725	int mmap_flags;
1726	void *vrc;
1727	int rc;
1728
1729	self->fd = open("/dev/iommu", O_RDWR);
1730	ASSERT_NE(-1, self->fd);
1731
1732	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
1733	if (rc || !self->buffer) {
1734		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1735			   variant->buffer_size, rc);
1736	}
1737
1738	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
1739	if (variant->hugepages) {
1740		/*
1741		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1742		 * not available.
1743		 */
1744		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1745	}
1746	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1747	vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
1748		   mmap_flags, -1, 0);
1749	assert(vrc == self->buffer);
1750
1751	self->page_size = MOCK_PAGE_SIZE;
1752	self->bitmap_size =
1753		variant->buffer_size / self->page_size / BITS_PER_BYTE;
1754
1755	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
1756	rc = posix_memalign(&self->bitmap, PAGE_SIZE,
1757			    self->bitmap_size + PAGE_SIZE);
1758	assert(!rc);
1759	assert(self->bitmap);
1760	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1761
1762	test_ioctl_ioas_alloc(&self->ioas_id);
1763	/* Enable 1M mock IOMMU hugepages */
1764	if (variant->hugepages) {
1765		test_cmd_mock_domain_flags(self->ioas_id,
1766					   MOCK_FLAGS_DEVICE_HUGE_IOVA,
1767					   &self->stdev_id, &self->hwpt_id,
1768					   &self->idev_id);
1769	} else {
1770		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
1771				     &self->hwpt_id, &self->idev_id);
1772	}
1773}
1774
1775FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1776{
1777	munmap(self->buffer, variant->buffer_size);
1778	munmap(self->bitmap, self->bitmap_size);
1779	teardown_iommufd(self->fd, _metadata);
1780}
1781
1782FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1783{
1784	/* one u32 index bitmap */
1785	.buffer_size = 128UL * 1024UL,
1786};
1787
1788FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
1789{
1790	/* one u64 index bitmap */
1791	.buffer_size = 256UL * 1024UL,
1792};
1793
1794FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
1795{
1796	/* two u64 index and trailing end bitmap */
1797	.buffer_size = 640UL * 1024UL,
1798};
1799
1800FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1801{
1802	/* 4K bitmap (128M IOVA range) */
1803	.buffer_size = 128UL * 1024UL * 1024UL,
1804};
1805
1806FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
1807{
1808	/* 4K bitmap (128M IOVA range) */
1809	.buffer_size = 128UL * 1024UL * 1024UL,
1810	.hugepages = true,
1811};
1812
1813FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
1814{
1815	/* 8K bitmap (256M IOVA range) */
1816	.buffer_size = 256UL * 1024UL * 1024UL,
1817};
1818
1819FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge)
1820{
1821	/* 8K bitmap (256M IOVA range) */
1822	.buffer_size = 256UL * 1024UL * 1024UL,
1823	.hugepages = true,
1824};
1825
1826TEST_F(iommufd_dirty_tracking, enforce_dirty)
1827{
1828	uint32_t ioas_id, stddev_id, idev_id;
1829	uint32_t hwpt_id, _hwpt_id;
1830	uint32_t dev_flags;
1831
1832	/* Regular case */
1833	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1834	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1835			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1836	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1837	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1838				   NULL);
1839	test_ioctl_destroy(stddev_id);
1840	test_ioctl_destroy(hwpt_id);
1841
1842	/* IOMMU device does not support dirty tracking */
1843	test_ioctl_ioas_alloc(&ioas_id);
1844	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1845				   &idev_id);
1846	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1847			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1848	test_ioctl_destroy(stddev_id);
1849}
1850
1851TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1852{
1853	uint32_t stddev_id;
1854	uint32_t hwpt_id;
1855
1856	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1857			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1858	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1859	test_cmd_set_dirty_tracking(hwpt_id, true);
1860	test_cmd_set_dirty_tracking(hwpt_id, false);
1861
1862	test_ioctl_destroy(stddev_id);
1863	test_ioctl_destroy(hwpt_id);
1864}
1865
1866TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1867{
1868	uint32_t caps = 0;
1869	uint32_t stddev_id;
1870	uint32_t hwpt_id;
1871
1872	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1873	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1874	test_cmd_get_hw_capabilities(self->idev_id, caps,
1875				     IOMMU_HW_CAP_DIRTY_TRACKING);
1876	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1877		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1878
1879	test_ioctl_destroy(stddev_id);
1880	test_ioctl_destroy(hwpt_id);
1881}
1882
1883TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1884{
1885	uint32_t page_size = MOCK_PAGE_SIZE;
1886	uint32_t hwpt_id;
1887	uint32_t ioas_id;
1888
1889	if (variant->hugepages)
1890		page_size = MOCK_HUGE_PAGE_SIZE;
1891
1892	test_ioctl_ioas_alloc(&ioas_id);
1893	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1894				     variant->buffer_size, MOCK_APERTURE_START);
1895
1896	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1897			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1898
1899	test_cmd_set_dirty_tracking(hwpt_id, true);
1900
1901	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1902				MOCK_APERTURE_START, self->page_size, page_size,
1903				self->bitmap, self->bitmap_size, 0, _metadata);
1904
1905	/* PAGE_SIZE unaligned bitmap */
1906	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1907				MOCK_APERTURE_START, self->page_size, page_size,
1908				self->bitmap + MOCK_PAGE_SIZE,
1909				self->bitmap_size, 0, _metadata);
1910
1911	/* u64 unaligned bitmap */
1912	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1913				MOCK_APERTURE_START, self->page_size, page_size,
1914				self->bitmap + 0xff1, self->bitmap_size, 0,
1915				_metadata);
1916
1917	test_ioctl_destroy(hwpt_id);
1918}
1919
1920TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1921{
1922	uint32_t page_size = MOCK_PAGE_SIZE;
1923	uint32_t hwpt_id;
1924	uint32_t ioas_id;
1925
1926	if (variant->hugepages)
1927		page_size = MOCK_HUGE_PAGE_SIZE;
1928
1929	test_ioctl_ioas_alloc(&ioas_id);
1930	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1931				     variant->buffer_size, MOCK_APERTURE_START);
1932
1933	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1934			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1935
1936	test_cmd_set_dirty_tracking(hwpt_id, true);
1937
1938	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1939				MOCK_APERTURE_START, self->page_size, page_size,
1940				self->bitmap, self->bitmap_size,
1941				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1942				_metadata);
1943
1944	/* Unaligned bitmap */
1945	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1946				MOCK_APERTURE_START, self->page_size, page_size,
1947				self->bitmap + MOCK_PAGE_SIZE,
1948				self->bitmap_size,
1949				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1950				_metadata);
1951
1952	/* u64 unaligned bitmap */
1953	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1954				MOCK_APERTURE_START, self->page_size, page_size,
1955				self->bitmap + 0xff1, self->bitmap_size,
1956				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1957				_metadata);
1958
1959	test_ioctl_destroy(hwpt_id);
1960}
1961
1962/* VFIO compatibility IOCTLs */
1963
1964TEST_F(iommufd, simple_ioctls)
1965{
1966	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1967	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1968}
1969
1970TEST_F(iommufd, unmap_cmd)
1971{
1972	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1973		.iova = MOCK_APERTURE_START,
1974		.size = PAGE_SIZE,
1975	};
1976
1977	unmap_cmd.argsz = 1;
1978	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1979
1980	unmap_cmd.argsz = sizeof(unmap_cmd);
1981	unmap_cmd.flags = 1 << 31;
1982	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1983
1984	unmap_cmd.flags = 0;
1985	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1986}
1987
1988TEST_F(iommufd, map_cmd)
1989{
1990	struct vfio_iommu_type1_dma_map map_cmd = {
1991		.iova = MOCK_APERTURE_START,
1992		.size = PAGE_SIZE,
1993		.vaddr = (__u64)buffer,
1994	};
1995
1996	map_cmd.argsz = 1;
1997	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1998
1999	map_cmd.argsz = sizeof(map_cmd);
2000	map_cmd.flags = 1 << 31;
2001	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2002
2003	/* Requires a domain to be attached */
2004	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2005	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2006}
2007
2008TEST_F(iommufd, info_cmd)
2009{
2010	struct vfio_iommu_type1_info info_cmd = {};
2011
2012	/* Invalid argsz */
2013	info_cmd.argsz = 1;
2014	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2015
2016	info_cmd.argsz = sizeof(info_cmd);
2017	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2018}
2019
2020TEST_F(iommufd, set_iommu_cmd)
2021{
2022	/* Requires a domain to be attached */
2023	EXPECT_ERRNO(ENODEV,
2024		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2025	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2026}
2027
2028TEST_F(iommufd, vfio_ioas)
2029{
2030	struct iommu_vfio_ioas vfio_ioas_cmd = {
2031		.size = sizeof(vfio_ioas_cmd),
2032		.op = IOMMU_VFIO_IOAS_GET,
2033	};
2034	__u32 ioas_id;
2035
2036	/* ENODEV if there is no compat ioas */
2037	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2038
2039	/* Invalid id for set */
2040	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2041	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2042
2043	/* Valid id for set*/
2044	test_ioctl_ioas_alloc(&ioas_id);
2045	vfio_ioas_cmd.ioas_id = ioas_id;
2046	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2047
2048	/* Same id comes back from get */
2049	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2050	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2051	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2052
2053	/* Clear works */
2054	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2055	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2056	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2057	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2058}
2059
2060FIXTURE(vfio_compat_mock_domain)
2061{
2062	int fd;
2063	uint32_t ioas_id;
2064};
2065
2066FIXTURE_VARIANT(vfio_compat_mock_domain)
2067{
2068	unsigned int version;
2069};
2070
2071FIXTURE_SETUP(vfio_compat_mock_domain)
2072{
2073	struct iommu_vfio_ioas vfio_ioas_cmd = {
2074		.size = sizeof(vfio_ioas_cmd),
2075		.op = IOMMU_VFIO_IOAS_SET,
2076	};
2077
2078	self->fd = open("/dev/iommu", O_RDWR);
2079	ASSERT_NE(-1, self->fd);
2080
2081	/* Create what VFIO would consider a group */
2082	test_ioctl_ioas_alloc(&self->ioas_id);
2083	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2084
2085	/* Attach it to the vfio compat */
2086	vfio_ioas_cmd.ioas_id = self->ioas_id;
2087	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2088	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2089}
2090
2091FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2092{
2093	teardown_iommufd(self->fd, _metadata);
2094}
2095
2096FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2097{
2098	.version = VFIO_TYPE1v2_IOMMU,
2099};
2100
2101FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2102{
2103	.version = VFIO_TYPE1_IOMMU,
2104};
2105
2106TEST_F(vfio_compat_mock_domain, simple_close)
2107{
2108}
2109
2110TEST_F(vfio_compat_mock_domain, option_huge_pages)
2111{
2112	struct iommu_option cmd = {
2113		.size = sizeof(cmd),
2114		.option_id = IOMMU_OPTION_HUGE_PAGES,
2115		.op = IOMMU_OPTION_OP_GET,
2116		.val64 = 3,
2117		.object_id = self->ioas_id,
2118	};
2119
2120	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2121	if (variant->version == VFIO_TYPE1_IOMMU) {
2122		ASSERT_EQ(0, cmd.val64);
2123	} else {
2124		ASSERT_EQ(1, cmd.val64);
2125	}
2126}
2127
2128/*
2129 * Execute an ioctl command stored in buffer and check that the result does not
2130 * overflow memory.
2131 */
2132static bool is_filled(const void *buf, uint8_t c, size_t len)
2133{
2134	const uint8_t *cbuf = buf;
2135
2136	for (; len; cbuf++, len--)
2137		if (*cbuf != c)
2138			return false;
2139	return true;
2140}
2141
2142#define ioctl_check_buf(fd, cmd)                                         \
2143	({                                                               \
2144		size_t _cmd_len = *(__u32 *)buffer;                      \
2145									 \
2146		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2147		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
2148		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
2149					  BUFFER_SIZE - _cmd_len));      \
2150	})
2151
2152static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2153				      struct vfio_iommu_type1_info *info_cmd)
2154{
2155	const struct vfio_info_cap_header *cap;
2156
2157	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2158	cap = buffer + info_cmd->cap_offset;
2159	while (true) {
2160		size_t cap_size;
2161
2162		if (cap->next)
2163			cap_size = (buffer + cap->next) - (void *)cap;
2164		else
2165			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2166
2167		switch (cap->id) {
2168		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2169			struct vfio_iommu_type1_info_cap_iova_range *data =
2170				(void *)cap;
2171
2172			ASSERT_EQ(1, data->header.version);
2173			ASSERT_EQ(1, data->nr_iovas);
2174			EXPECT_EQ(MOCK_APERTURE_START,
2175				  data->iova_ranges[0].start);
2176			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2177			break;
2178		}
2179		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2180			struct vfio_iommu_type1_info_dma_avail *data =
2181				(void *)cap;
2182
2183			ASSERT_EQ(1, data->header.version);
2184			ASSERT_EQ(sizeof(*data), cap_size);
2185			break;
2186		}
2187		default:
2188			ASSERT_EQ(false, true);
2189			break;
2190		}
2191		if (!cap->next)
2192			break;
2193
2194		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2195		ASSERT_GE(buffer + cap->next, (void *)cap);
2196		cap = buffer + cap->next;
2197	}
2198}
2199
2200TEST_F(vfio_compat_mock_domain, get_info)
2201{
2202	struct vfio_iommu_type1_info *info_cmd = buffer;
2203	unsigned int i;
2204	size_t caplen;
2205
2206	/* Pre-cap ABI */
2207	*info_cmd = (struct vfio_iommu_type1_info){
2208		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2209	};
2210	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2211	ASSERT_NE(0, info_cmd->iova_pgsizes);
2212	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2213		  info_cmd->flags);
2214
2215	/* Read the cap chain size */
2216	*info_cmd = (struct vfio_iommu_type1_info){
2217		.argsz = sizeof(*info_cmd),
2218	};
2219	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2220	ASSERT_NE(0, info_cmd->iova_pgsizes);
2221	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2222		  info_cmd->flags);
2223	ASSERT_EQ(0, info_cmd->cap_offset);
2224	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2225
2226	/* Read the caps, kernel should never create a corrupted caps */
2227	caplen = info_cmd->argsz;
2228	for (i = sizeof(*info_cmd); i < caplen; i++) {
2229		*info_cmd = (struct vfio_iommu_type1_info){
2230			.argsz = i,
2231		};
2232		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2233		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2234			  info_cmd->flags);
2235		if (!info_cmd->cap_offset)
2236			continue;
2237		check_vfio_info_cap_chain(_metadata, info_cmd);
2238	}
2239}
2240
2241static void shuffle_array(unsigned long *array, size_t nelms)
2242{
2243	unsigned int i;
2244
2245	/* Shuffle */
2246	for (i = 0; i != nelms; i++) {
2247		unsigned long tmp = array[i];
2248		unsigned int other = rand() % (nelms - i);
2249
2250		array[i] = array[other];
2251		array[other] = tmp;
2252	}
2253}
2254
2255TEST_F(vfio_compat_mock_domain, map)
2256{
2257	struct vfio_iommu_type1_dma_map map_cmd = {
2258		.argsz = sizeof(map_cmd),
2259		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2260		.vaddr = (uintptr_t)buffer,
2261		.size = BUFFER_SIZE,
2262		.iova = MOCK_APERTURE_START,
2263	};
2264	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2265		.argsz = sizeof(unmap_cmd),
2266		.size = BUFFER_SIZE,
2267		.iova = MOCK_APERTURE_START,
2268	};
2269	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2270	unsigned int i;
2271
2272	/* Simple map/unmap */
2273	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2274	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2275	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2276
2277	/* UNMAP_FLAG_ALL requires 0 iova/size */
2278	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2279	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2280	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2281
2282	unmap_cmd.iova = 0;
2283	unmap_cmd.size = 0;
2284	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2285	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2286
2287	/* Small pages */
2288	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2289		map_cmd.iova = pages_iova[i] =
2290			MOCK_APERTURE_START + i * PAGE_SIZE;
2291		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2292		map_cmd.size = PAGE_SIZE;
2293		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2294	}
2295	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2296
2297	unmap_cmd.flags = 0;
2298	unmap_cmd.size = PAGE_SIZE;
2299	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2300		unmap_cmd.iova = pages_iova[i];
2301		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2302	}
2303}
2304
2305TEST_F(vfio_compat_mock_domain, huge_map)
2306{
2307	size_t buf_size = HUGEPAGE_SIZE * 2;
2308	struct vfio_iommu_type1_dma_map map_cmd = {
2309		.argsz = sizeof(map_cmd),
2310		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2311		.size = buf_size,
2312		.iova = MOCK_APERTURE_START,
2313	};
2314	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2315		.argsz = sizeof(unmap_cmd),
2316	};
2317	unsigned long pages_iova[16];
2318	unsigned int i;
2319	void *buf;
2320
2321	/* Test huge pages and splitting */
2322	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2323		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2324		   0);
2325	ASSERT_NE(MAP_FAILED, buf);
2326	map_cmd.vaddr = (uintptr_t)buf;
2327	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2328
2329	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2330	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2331		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2332	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2333
2334	/* type1 mode can cut up larger mappings, type1v2 always fails */
2335	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2336		unmap_cmd.iova = pages_iova[i];
2337		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2338		if (variant->version == VFIO_TYPE1_IOMMU) {
2339			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2340					   &unmap_cmd));
2341		} else {
2342			EXPECT_ERRNO(ENOENT,
2343				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2344					   &unmap_cmd));
2345		}
2346	}
2347}
2348
2349TEST_HARNESS_MAIN
2350