1210688Srpaulo// SPDX-License-Identifier: GPL-2.0-only
2210688Srpaulo/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3210688Srpaulo *
4210688Srpaulo * These tests are "kernel integrity" tests. They are looking for kernel
5210688Srpaulo * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6210688Srpaulo * features. It does not attempt to verify that the system calls are doing what
7210688Srpaulo * they are supposed to do.
8210688Srpaulo *
9210688Srpaulo * The basic philosophy is to run a sequence of calls that will succeed and then
10210688Srpaulo * sweep every failure injection point on that call chain to look for
11210688Srpaulo * interesting things in error handling.
12210688Srpaulo *
13210688Srpaulo * This test is best run with:
14210688Srpaulo *  echo 1 > /proc/sys/kernel/panic_on_warn
15210688Srpaulo * If something is actually going wrong.
16210688Srpaulo */
17210688Srpaulo#include <fcntl.h>
18210688Srpaulo#include <dirent.h>
19210688Srpaulo
20210688Srpaulo#define __EXPORTED_HEADERS__
21210688Srpaulo#include <linux/vfio.h>
22210688Srpaulo
23210688Srpaulo#include "iommufd_utils.h"
24210688Srpaulo
25210688Srpaulostatic bool have_fault_injection;
26210688Srpaulo
27210688Srpaulostatic int writeat(int dfd, const char *fn, const char *val)
28210688Srpaulo{
29210688Srpaulo	size_t val_len = strlen(val);
30210688Srpaulo	ssize_t res;
31210688Srpaulo	int fd;
32210688Srpaulo
33210688Srpaulo	fd = openat(dfd, fn, O_WRONLY);
34210688Srpaulo	if (fd == -1)
35210688Srpaulo		return -1;
36210688Srpaulo	res = write(fd, val, val_len);
37210688Srpaulo	assert(res == val_len);
38210688Srpaulo	close(fd);
39210688Srpaulo	return 0;
40257670Smarkj}
41257670Smarkj
42210688Srpaulostatic __attribute__((constructor)) void setup_buffer(void)
43210688Srpaulo{
44210688Srpaulo	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
45285003Sbr
46285003Sbr	BUFFER_SIZE = 2*1024*1024;
47285003Sbr
48285003Sbr	buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49285003Sbr		      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
50285003Sbr}
51285003Sbr
52285003Sbr/*
53210688Srpaulo * This sets up fail_injection in a way that is useful for this test.
54287106Sandrew * It does not attempt to restore things back to how they were.
55285003Sbr */
56285003Sbrstatic __attribute__((constructor)) void setup_fault_injection(void)
57285003Sbr{
58233402Sgonzo	DIR *debugfs = opendir("/sys/kernel/debug/");
59285003Sbr	struct dirent *dent;
60233402Sgonzo
61242723Sjhibbits	if (!debugfs)
62285003Sbr		return;
63285003Sbr
64294662Sbr	/* Allow any allocation call to be fault injected */
65294662Sbr	if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
66294662Sbr		return;
67210688Srpaulo	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
68210688Srpaulo	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
69210688Srpaulo
70210688Srpaulo	while ((dent = readdir(debugfs))) {
71257670Smarkj		char fn[300];
72257670Smarkj
73257670Smarkj		if (strncmp(dent->d_name, "fail", 4) != 0)
74257670Smarkj			continue;
75257670Smarkj
76257670Smarkj		/* We are looking for kernel splats, quiet down the log */
77257670Smarkj		snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
78257670Smarkj		writeat(dirfd(debugfs), fn, "0");
79257670Smarkj	}
80257670Smarkj	closedir(debugfs);
81257670Smarkj	have_fault_injection = true;
82257670Smarkj}
83257670Smarkj
84257670Smarkjstruct fail_nth_state {
85257670Smarkj	int proc_fd;
86257670Smarkj	unsigned int iteration;
87257670Smarkj};
88257670Smarkj
89257670Smarkjstatic void fail_nth_first(struct __test_metadata *_metadata,
90210688Srpaulo			   struct fail_nth_state *nth_state)
91210688Srpaulo{
92210688Srpaulo	char buf[300];
93210688Srpaulo
94210688Srpaulo	snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
95210688Srpaulo	nth_state->proc_fd = open(buf, O_RDWR);
96265308Smarkj	ASSERT_NE(-1, nth_state->proc_fd);
97210688Srpaulo}
98210688Srpaulo
99210688Srpaulostatic bool fail_nth_next(struct __test_metadata *_metadata,
100210688Srpaulo			  struct fail_nth_state *nth_state,
101210688Srpaulo			  int test_result)
102210688Srpaulo{
103210688Srpaulo	static const char disable_nth[] = "0";
104210688Srpaulo	char buf[300];
105257670Smarkj
106257670Smarkj	/*
107265308Smarkj	 * This is just an arbitrary limit based on the current kernel
108265308Smarkj	 * situation. Changes in the kernel can dramatically change the number of
109257670Smarkj	 * required fault injection sites, so if this hits it doesn't
110257670Smarkj	 * necessarily mean a test failure, just that the limit has to be made
111265308Smarkj	 * bigger.
112265308Smarkj	 */
113257670Smarkj	ASSERT_GT(400, nth_state->iteration);
114210688Srpaulo	if (nth_state->iteration != 0) {
115210688Srpaulo		ssize_t res;
116210688Srpaulo		ssize_t res2;
117210688Srpaulo
118210688Srpaulo		buf[0] = 0;
119210688Srpaulo		/*
120210688Srpaulo		 * Annoyingly disabling the nth can also fail. This means
121210688Srpaulo		 * the test passed without triggering failure
122210688Srpaulo		 */
123210688Srpaulo		res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
124257222Smarkj		if (res == -1 && errno == EFAULT) {
125257222Smarkj			buf[0] = '1';
126257670Smarkj			buf[1] = '\n';
127257670Smarkj			res = 2;
128210688Srpaulo		}
129210688Srpaulo
130210688Srpaulo		res2 = pwrite(nth_state->proc_fd, disable_nth,
131210688Srpaulo			      ARRAY_SIZE(disable_nth) - 1, 0);
132210688Srpaulo		if (res2 == -1 && errno == EFAULT) {
133210688Srpaulo			res2 = pwrite(nth_state->proc_fd, disable_nth,
134210688Srpaulo				      ARRAY_SIZE(disable_nth) - 1, 0);
135210688Srpaulo			buf[0] = '1';
136210688Srpaulo			buf[1] = '\n';
137210688Srpaulo		}
138210688Srpaulo		ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
139210688Srpaulo
140257222Smarkj		/* printf("  nth %u result=%d nth=%u\n", nth_state->iteration,
141257222Smarkj		       test_result, atoi(buf)); */
142257670Smarkj		fflush(stdout);
143257670Smarkj		ASSERT_LT(1, res);
144210688Srpaulo		if (res != 2 || buf[0] != '0' || buf[1] != '\n')
145210688Srpaulo			return false;
146257670Smarkj	} else {
147265308Smarkj		/* printf("  nth %u result=%d\n", nth_state->iteration,
148257670Smarkj		       test_result); */
149265308Smarkj	}
150257670Smarkj	nth_state->iteration++;
151257670Smarkj	return true;
152210688Srpaulo}
153210688Srpaulo
154210688Srpaulo/*
155210688Srpaulo * This is called during the test to start failure injection. It allows the test
156210688Srpaulo * to do some setup that has already been swept and thus reduce the required
157210688Srpaulo * iterations.
158210688Srpaulo */
159210688Srpaulovoid __fail_nth_enable(struct __test_metadata *_metadata,
160265308Smarkj		       struct fail_nth_state *nth_state)
161210688Srpaulo{
162210688Srpaulo	char buf[300];
163210688Srpaulo	size_t len;
164210688Srpaulo
165210688Srpaulo	if (!nth_state->iteration)
166210688Srpaulo		return;
167257670Smarkj
168257670Smarkj	len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
169257670Smarkj	ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
170265308Smarkj}
171265308Smarkj#define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
172257670Smarkj
173257670Smarkj#define TEST_FAIL_NTH(fixture_name, name)                                           \
174265308Smarkj	static int test_nth_##name(struct __test_metadata *_metadata,               \
175265308Smarkj				   FIXTURE_DATA(fixture_name) *self,                \
176257670Smarkj				   const FIXTURE_VARIANT(fixture_name)              \
177210688Srpaulo					   *variant,                                \
178210688Srpaulo				   struct fail_nth_state *_nth_state);              \
179210688Srpaulo	TEST_F(fixture_name, name)                                                  \
180210688Srpaulo	{                                                                           \
181210688Srpaulo		struct fail_nth_state nth_state = {};                               \
182210688Srpaulo		int test_result = 0;                                                \
183210688Srpaulo										    \
184210688Srpaulo		if (!have_fault_injection)                                          \
185210688Srpaulo			SKIP(return,                                                \
186210688Srpaulo				   "fault injection is not enabled in the kernel"); \
187257222Smarkj		fail_nth_first(_metadata, &nth_state);                              \
188257222Smarkj		ASSERT_EQ(0, test_nth_##name(_metadata, self, variant,              \
189257670Smarkj					     &nth_state));                          \
190210688Srpaulo		while (fail_nth_next(_metadata, &nth_state, test_result)) {         \
191257670Smarkj			fixture_name##_teardown(_metadata, self, variant);          \
192265308Smarkj			fixture_name##_setup(_metadata, self, variant);             \
193257670Smarkj			test_result = test_nth_##name(_metadata, self,              \
194265308Smarkj						      variant, &nth_state);         \
195210688Srpaulo		};                                                                  \
196257670Smarkj		ASSERT_EQ(0, test_result);                                          \
197210688Srpaulo	}                                                                           \
198210688Srpaulo	static int test_nth_##name(                                                 \
199210688Srpaulo		struct __test_metadata __attribute__((unused)) *_metadata,          \
200210688Srpaulo		FIXTURE_DATA(fixture_name) __attribute__((unused)) *self,           \
201210688Srpaulo		const FIXTURE_VARIANT(fixture_name) __attribute__((unused))         \
202287106Sandrew			*variant,                                                   \
203287106Sandrew		struct fail_nth_state *_nth_state)
204287106Sandrew
205287106SandrewFIXTURE(basic_fail_nth)
206210688Srpaulo{
207210688Srpaulo	int fd;
208210688Srpaulo	uint32_t access_id;
209210688Srpaulo};
210287106Sandrew
211287106SandrewFIXTURE_SETUP(basic_fail_nth)
212287106Sandrew{
213287106Sandrew	self->fd = -1;
214287106Sandrew	self->access_id = 0;
215210688Srpaulo}
216210688Srpaulo
217210688SrpauloFIXTURE_TEARDOWN(basic_fail_nth)
218210688Srpaulo{
219210688Srpaulo	int rc;
220210688Srpaulo
221210688Srpaulo	if (self->access_id) {
222210688Srpaulo		/* The access FD holds the iommufd open until it closes */
223210688Srpaulo		rc = _test_cmd_destroy_access(self->access_id);
224210688Srpaulo		assert(rc == 0);
225210688Srpaulo	}
226210688Srpaulo	teardown_iommufd(self->fd, _metadata);
227210688Srpaulo}
228257222Smarkj
229210688Srpaulo/* Cover ioas.c */
230210688SrpauloTEST_FAIL_NTH(basic_fail_nth, basic)
231210688Srpaulo{
232210688Srpaulo	struct iommu_iova_range ranges[10];
233257222Smarkj	uint32_t ioas_id;
234210688Srpaulo	__u64 iova;
235210688Srpaulo
236210688Srpaulo	fail_nth_enable();
237210688Srpaulo
238210688Srpaulo	self->fd = open("/dev/iommu", O_RDWR);
239210688Srpaulo	if (self->fd == -1)
240210688Srpaulo		return -1;
241210688Srpaulo
242257222Smarkj	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
243210688Srpaulo		return -1;
244210688Srpaulo
245211184Srpaulo	{
246211184Srpaulo		struct iommu_ioas_iova_ranges ranges_cmd = {
247210688Srpaulo			.size = sizeof(ranges_cmd),
248257222Smarkj			.num_iovas = ARRAY_SIZE(ranges),
249210688Srpaulo			.ioas_id = ioas_id,
250210688Srpaulo			.allowed_iovas = (uintptr_t)ranges,
251210688Srpaulo		};
252210688Srpaulo		if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
253210688Srpaulo			return -1;
254210688Srpaulo	}
255210688Srpaulo
256257222Smarkj	{
257210688Srpaulo		struct iommu_ioas_allow_iovas allow_cmd = {
258210688Srpaulo			.size = sizeof(allow_cmd),
259210688Srpaulo			.ioas_id = ioas_id,
260210688Srpaulo			.num_iovas = 1,
261210688Srpaulo			.allowed_iovas = (uintptr_t)ranges,
262210688Srpaulo		};
263
264		ranges[0].start = 16*1024;
265		ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
266		if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
267			return -1;
268	}
269
270	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
271				 IOMMU_IOAS_MAP_WRITEABLE |
272					 IOMMU_IOAS_MAP_READABLE))
273		return -1;
274
275	{
276		struct iommu_ioas_copy copy_cmd = {
277			.size = sizeof(copy_cmd),
278			.flags = IOMMU_IOAS_MAP_WRITEABLE |
279				 IOMMU_IOAS_MAP_READABLE,
280			.dst_ioas_id = ioas_id,
281			.src_ioas_id = ioas_id,
282			.src_iova = iova,
283			.length = sizeof(ranges),
284		};
285
286		if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
287			return -1;
288	}
289
290	if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
291				   NULL))
292		return -1;
293	/* Failure path of no IOVA to unmap */
294	_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
295	return 0;
296}
297
298/* iopt_area_fill_domains() and iopt_area_fill_domain() */
299TEST_FAIL_NTH(basic_fail_nth, map_domain)
300{
301	uint32_t ioas_id;
302	__u32 stdev_id;
303	__u32 hwpt_id;
304	__u64 iova;
305
306	self->fd = open("/dev/iommu", O_RDWR);
307	if (self->fd == -1)
308		return -1;
309
310	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
311		return -1;
312
313	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
314		return -1;
315
316	fail_nth_enable();
317
318	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
319		return -1;
320
321	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
322				 IOMMU_IOAS_MAP_WRITEABLE |
323					 IOMMU_IOAS_MAP_READABLE))
324		return -1;
325
326	if (_test_ioctl_destroy(self->fd, stdev_id))
327		return -1;
328
329	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
330		return -1;
331	return 0;
332}
333
334TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
335{
336	uint32_t ioas_id;
337	__u32 stdev_id2;
338	__u32 stdev_id;
339	__u32 hwpt_id2;
340	__u32 hwpt_id;
341	__u64 iova;
342
343	self->fd = open("/dev/iommu", O_RDWR);
344	if (self->fd == -1)
345		return -1;
346
347	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
348		return -1;
349
350	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
351		return -1;
352
353	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
354		return -1;
355
356	fail_nth_enable();
357
358	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
359				  NULL))
360		return -1;
361
362	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
363				 IOMMU_IOAS_MAP_WRITEABLE |
364					 IOMMU_IOAS_MAP_READABLE))
365		return -1;
366
367	if (_test_ioctl_destroy(self->fd, stdev_id))
368		return -1;
369
370	if (_test_ioctl_destroy(self->fd, stdev_id2))
371		return -1;
372
373	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
374		return -1;
375	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
376				  NULL))
377		return -1;
378	return 0;
379}
380
381TEST_FAIL_NTH(basic_fail_nth, access_rw)
382{
383	uint64_t tmp_big[4096];
384	uint32_t ioas_id;
385	uint16_t tmp[32];
386	__u64 iova;
387
388	self->fd = open("/dev/iommu", O_RDWR);
389	if (self->fd == -1)
390		return -1;
391
392	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
393		return -1;
394
395	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
396		return -1;
397
398	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
399				 IOMMU_IOAS_MAP_WRITEABLE |
400					 IOMMU_IOAS_MAP_READABLE))
401		return -1;
402
403	fail_nth_enable();
404
405	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
406		return -1;
407
408	{
409		struct iommu_test_cmd access_cmd = {
410			.size = sizeof(access_cmd),
411			.op = IOMMU_TEST_OP_ACCESS_RW,
412			.id = self->access_id,
413			.access_rw = { .iova = iova,
414				       .length = sizeof(tmp),
415				       .uptr = (uintptr_t)tmp },
416		};
417
418		// READ
419		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
420			  &access_cmd))
421			return -1;
422
423		access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
424		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
425			  &access_cmd))
426			return -1;
427
428		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
429		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
430			  &access_cmd))
431			return -1;
432		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
433					     MOCK_ACCESS_RW_WRITE;
434		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
435			  &access_cmd))
436			return -1;
437	}
438
439	{
440		struct iommu_test_cmd access_cmd = {
441			.size = sizeof(access_cmd),
442			.op = IOMMU_TEST_OP_ACCESS_RW,
443			.id = self->access_id,
444			.access_rw = { .iova = iova,
445				       .flags = MOCK_ACCESS_RW_SLOW_PATH,
446				       .length = sizeof(tmp_big),
447				       .uptr = (uintptr_t)tmp_big },
448		};
449
450		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
451			  &access_cmd))
452			return -1;
453	}
454	if (_test_cmd_destroy_access(self->access_id))
455		return -1;
456	self->access_id = 0;
457	return 0;
458}
459
460/* pages.c access functions */
461TEST_FAIL_NTH(basic_fail_nth, access_pin)
462{
463	uint32_t access_pages_id;
464	uint32_t ioas_id;
465	__u64 iova;
466
467	self->fd = open("/dev/iommu", O_RDWR);
468	if (self->fd == -1)
469		return -1;
470
471	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
472		return -1;
473
474	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
475		return -1;
476
477	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
478				 IOMMU_IOAS_MAP_WRITEABLE |
479					 IOMMU_IOAS_MAP_READABLE))
480		return -1;
481
482	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
483				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
484		return -1;
485
486	fail_nth_enable();
487
488	{
489		struct iommu_test_cmd access_cmd = {
490			.size = sizeof(access_cmd),
491			.op = IOMMU_TEST_OP_ACCESS_PAGES,
492			.id = self->access_id,
493			.access_pages = { .iova = iova,
494					  .length = BUFFER_SIZE,
495					  .uptr = (uintptr_t)buffer },
496		};
497
498		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
499			  &access_cmd))
500			return -1;
501		access_pages_id = access_cmd.access_pages.out_access_pages_id;
502	}
503
504	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
505					   access_pages_id))
506		return -1;
507
508	if (_test_cmd_destroy_access(self->access_id))
509		return -1;
510	self->access_id = 0;
511	return 0;
512}
513
514/* iopt_pages_fill_xarray() */
515TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
516{
517	uint32_t access_pages_id;
518	uint32_t ioas_id;
519	__u32 stdev_id;
520	__u32 hwpt_id;
521	__u64 iova;
522
523	self->fd = open("/dev/iommu", O_RDWR);
524	if (self->fd == -1)
525		return -1;
526
527	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
528		return -1;
529
530	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
531		return -1;
532
533	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
534		return -1;
535
536	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
537				 IOMMU_IOAS_MAP_WRITEABLE |
538					 IOMMU_IOAS_MAP_READABLE))
539		return -1;
540
541	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
542				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
543		return -1;
544
545	fail_nth_enable();
546
547	{
548		struct iommu_test_cmd access_cmd = {
549			.size = sizeof(access_cmd),
550			.op = IOMMU_TEST_OP_ACCESS_PAGES,
551			.id = self->access_id,
552			.access_pages = { .iova = iova,
553					  .length = BUFFER_SIZE,
554					  .uptr = (uintptr_t)buffer },
555		};
556
557		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
558			  &access_cmd))
559			return -1;
560		access_pages_id = access_cmd.access_pages.out_access_pages_id;
561	}
562
563	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
564					   access_pages_id))
565		return -1;
566
567	if (_test_cmd_destroy_access(self->access_id))
568		return -1;
569	self->access_id = 0;
570
571	if (_test_ioctl_destroy(self->fd, stdev_id))
572		return -1;
573	return 0;
574}
575
576/* device.c */
577TEST_FAIL_NTH(basic_fail_nth, device)
578{
579	struct iommu_test_hw_info info;
580	uint32_t ioas_id;
581	uint32_t ioas_id2;
582	uint32_t stdev_id;
583	uint32_t idev_id;
584	uint32_t hwpt_id;
585	__u64 iova;
586
587	self->fd = open("/dev/iommu", O_RDWR);
588	if (self->fd == -1)
589		return -1;
590
591	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
592		return -1;
593
594	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
595		return -1;
596
597	iova = MOCK_APERTURE_START;
598	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
599				 IOMMU_IOAS_MAP_FIXED_IOVA |
600					 IOMMU_IOAS_MAP_WRITEABLE |
601					 IOMMU_IOAS_MAP_READABLE))
602		return -1;
603	if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
604				 IOMMU_IOAS_MAP_FIXED_IOVA |
605					 IOMMU_IOAS_MAP_WRITEABLE |
606					 IOMMU_IOAS_MAP_READABLE))
607		return -1;
608
609	fail_nth_enable();
610
611	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
612				  &idev_id))
613		return -1;
614
615	if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
616		return -1;
617
618	if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id,
619				 IOMMU_HWPT_DATA_NONE, 0, 0))
620		return -1;
621
622	if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
623		return -1;
624
625	if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
626		return -1;
627	return 0;
628}
629
630TEST_HARNESS_MAIN
631