• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <stdlib.h>
4 #include <sys/mman.h>
5 #include <sys/eventfd.h>
6 
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
9 
10 #include "iommufd_utils.h"
11 
12 static unsigned long HUGEPAGE_SIZE;
13 
14 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
16 
get_huge_page_size(void)17 static unsigned long get_huge_page_size(void)
18 {
19 	char buf[80];
20 	int ret;
21 	int fd;
22 
23 	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
24 		  O_RDONLY);
25 	if (fd < 0)
26 		return 2 * 1024 * 1024;
27 
28 	ret = read(fd, buf, sizeof(buf));
29 	close(fd);
30 	if (ret <= 0 || ret == sizeof(buf))
31 		return 2 * 1024 * 1024;
32 	buf[ret] = 0;
33 	return strtoul(buf, NULL, 10);
34 }
35 
setup_sizes(void)36 static __attribute__((constructor)) void setup_sizes(void)
37 {
38 	void *vrc;
39 	int rc;
40 
41 	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
42 	HUGEPAGE_SIZE = get_huge_page_size();
43 
44 	BUFFER_SIZE = PAGE_SIZE * 16;
45 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
46 	assert(!rc);
47 	assert(buffer);
48 	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
49 	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
50 		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
51 	assert(vrc == buffer);
52 }
53 
FIXTURE(iommufd)54 FIXTURE(iommufd)
55 {
56 	int fd;
57 };
58 
FIXTURE_SETUP(iommufd)59 FIXTURE_SETUP(iommufd)
60 {
61 	self->fd = open("/dev/iommu", O_RDWR);
62 	ASSERT_NE(-1, self->fd);
63 }
64 
FIXTURE_TEARDOWN(iommufd)65 FIXTURE_TEARDOWN(iommufd)
66 {
67 	teardown_iommufd(self->fd, _metadata);
68 }
69 
TEST_F(iommufd,simple_close)70 TEST_F(iommufd, simple_close)
71 {
72 }
73 
TEST_F(iommufd,cmd_fail)74 TEST_F(iommufd, cmd_fail)
75 {
76 	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
77 
78 	/* object id is invalid */
79 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
80 	/* Bad pointer */
81 	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
82 	/* Unknown ioctl */
83 	EXPECT_ERRNO(ENOTTY,
84 		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
85 			   &cmd));
86 }
87 
TEST_F(iommufd,cmd_length)88 TEST_F(iommufd, cmd_length)
89 {
90 #define TEST_LENGTH(_struct, _ioctl, _last)                              \
91 	{                                                                \
92 		size_t min_size = offsetofend(struct _struct, _last);    \
93 		struct {                                                 \
94 			struct _struct cmd;                              \
95 			uint8_t extra;                                   \
96 		} cmd = { .cmd = { .size = min_size - 1 },               \
97 			  .extra = UINT8_MAX };                          \
98 		int old_errno;                                           \
99 		int rc;                                                  \
100 									 \
101 		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
102 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
103 		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
104 		cmd.cmd.size = sizeof(struct _struct);                   \
105 		rc = ioctl(self->fd, _ioctl, &cmd);                      \
106 		old_errno = errno;                                       \
107 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
108 		cmd.extra = 0;                                           \
109 		if (rc) {                                                \
110 			EXPECT_ERRNO(old_errno,                          \
111 				     ioctl(self->fd, _ioctl, &cmd));     \
112 		} else {                                                 \
113 			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
114 		}                                                        \
115 	}
116 
117 	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
118 	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
119 	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
120 	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
121 	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
122 	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
123 		    out_iova_alignment);
124 	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
125 		    allowed_iovas);
126 	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
127 	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
128 	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
129 	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
130 	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
131 #undef TEST_LENGTH
132 }
133 
TEST_F(iommufd,cmd_ex_fail)134 TEST_F(iommufd, cmd_ex_fail)
135 {
136 	struct {
137 		struct iommu_destroy cmd;
138 		__u64 future;
139 	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
140 
141 	/* object id is invalid and command is longer */
142 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
143 	/* future area is non-zero */
144 	cmd.future = 1;
145 	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
146 	/* Original command "works" */
147 	cmd.cmd.size = sizeof(cmd.cmd);
148 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
149 	/* Short command fails */
150 	cmd.cmd.size = sizeof(cmd.cmd) - 1;
151 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152 }
153 
TEST_F(iommufd,global_options)154 TEST_F(iommufd, global_options)
155 {
156 	struct iommu_option cmd = {
157 		.size = sizeof(cmd),
158 		.option_id = IOMMU_OPTION_RLIMIT_MODE,
159 		.op = IOMMU_OPTION_OP_GET,
160 		.val64 = 1,
161 	};
162 
163 	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
164 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
165 	ASSERT_EQ(0, cmd.val64);
166 
167 	/* This requires root */
168 	cmd.op = IOMMU_OPTION_OP_SET;
169 	cmd.val64 = 1;
170 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
171 	cmd.val64 = 2;
172 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
173 
174 	cmd.op = IOMMU_OPTION_OP_GET;
175 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
176 	ASSERT_EQ(1, cmd.val64);
177 
178 	cmd.op = IOMMU_OPTION_OP_SET;
179 	cmd.val64 = 0;
180 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 
182 	cmd.op = IOMMU_OPTION_OP_GET;
183 	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
184 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 	cmd.op = IOMMU_OPTION_OP_SET;
186 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
187 }
188 
FIXTURE(iommufd_ioas)189 FIXTURE(iommufd_ioas)
190 {
191 	int fd;
192 	uint32_t ioas_id;
193 	uint32_t stdev_id;
194 	uint32_t hwpt_id;
195 	uint32_t device_id;
196 	uint64_t base_iova;
197 };
198 
FIXTURE_VARIANT(iommufd_ioas)199 FIXTURE_VARIANT(iommufd_ioas)
200 {
201 	unsigned int mock_domains;
202 	unsigned int memory_limit;
203 };
204 
FIXTURE_SETUP(iommufd_ioas)205 FIXTURE_SETUP(iommufd_ioas)
206 {
207 	unsigned int i;
208 
209 
210 	self->fd = open("/dev/iommu", O_RDWR);
211 	ASSERT_NE(-1, self->fd);
212 	test_ioctl_ioas_alloc(&self->ioas_id);
213 
214 	if (!variant->memory_limit) {
215 		test_ioctl_set_default_memory_limit();
216 	} else {
217 		test_ioctl_set_temp_memory_limit(variant->memory_limit);
218 	}
219 
220 	for (i = 0; i != variant->mock_domains; i++) {
221 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
222 				     &self->hwpt_id, &self->device_id);
223 		self->base_iova = MOCK_APERTURE_START;
224 	}
225 }
226 
FIXTURE_TEARDOWN(iommufd_ioas)227 FIXTURE_TEARDOWN(iommufd_ioas)
228 {
229 	test_ioctl_set_default_memory_limit();
230 	teardown_iommufd(self->fd, _metadata);
231 }
232 
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)233 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
234 {
235 };
236 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)237 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
238 {
239 	.mock_domains = 1,
240 };
241 
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)242 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
243 {
244 	.mock_domains = 2,
245 };
246 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)247 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
248 {
249 	.mock_domains = 1,
250 	.memory_limit = 16,
251 };
252 
TEST_F(iommufd_ioas,ioas_auto_destroy)253 TEST_F(iommufd_ioas, ioas_auto_destroy)
254 {
255 }
256 
TEST_F(iommufd_ioas,ioas_destroy)257 TEST_F(iommufd_ioas, ioas_destroy)
258 {
259 	if (self->stdev_id) {
260 		/* IOAS cannot be freed while a device has a HWPT using it */
261 		EXPECT_ERRNO(EBUSY,
262 			     _test_ioctl_destroy(self->fd, self->ioas_id));
263 	} else {
264 		/* Can allocate and manually free an IOAS table */
265 		test_ioctl_destroy(self->ioas_id);
266 	}
267 }
268 
TEST_F(iommufd_ioas,alloc_hwpt_nested)269 TEST_F(iommufd_ioas, alloc_hwpt_nested)
270 {
271 	const uint32_t min_data_len =
272 		offsetofend(struct iommu_hwpt_selftest, iotlb);
273 	struct iommu_hwpt_selftest data = {
274 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
275 	};
276 	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
277 	uint32_t nested_hwpt_id[2] = {};
278 	uint32_t num_inv;
279 	uint32_t parent_hwpt_id = 0;
280 	uint32_t parent_hwpt_id_not_work = 0;
281 	uint32_t test_hwpt_id = 0;
282 	uint32_t iopf_hwpt_id;
283 	uint32_t fault_id;
284 	uint32_t fault_fd;
285 
286 	if (self->device_id) {
287 		/* Negative tests */
288 		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
289 				    &test_hwpt_id);
290 		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
291 				    &test_hwpt_id);
292 		test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
293 				    IOMMU_HWPT_ALLOC_NEST_PARENT |
294 						IOMMU_HWPT_FAULT_ID_VALID,
295 				    &test_hwpt_id);
296 
297 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
298 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
299 				    &parent_hwpt_id);
300 
301 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
302 				    &parent_hwpt_id_not_work);
303 
304 		/* Negative nested tests */
305 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
306 					   parent_hwpt_id, 0,
307 					   &nested_hwpt_id[0],
308 					   IOMMU_HWPT_DATA_NONE, &data,
309 					   sizeof(data));
310 		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
311 					   parent_hwpt_id, 0,
312 					   &nested_hwpt_id[0],
313 					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
314 					   sizeof(data));
315 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
316 					   parent_hwpt_id, 0,
317 					   &nested_hwpt_id[0],
318 					   IOMMU_HWPT_DATA_SELFTEST, &data,
319 					   min_data_len - 1);
320 		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
321 					   parent_hwpt_id, 0,
322 					   &nested_hwpt_id[0],
323 					   IOMMU_HWPT_DATA_SELFTEST, NULL,
324 					   sizeof(data));
325 		test_err_hwpt_alloc_nested(
326 			EOPNOTSUPP, self->device_id, parent_hwpt_id,
327 			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
328 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
329 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
330 					   parent_hwpt_id_not_work, 0,
331 					   &nested_hwpt_id[0],
332 					   IOMMU_HWPT_DATA_SELFTEST, &data,
333 					   sizeof(data));
334 
335 		/* Allocate two nested hwpts sharing one common parent hwpt */
336 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
337 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
338 					   &nested_hwpt_id[0],
339 					   IOMMU_HWPT_DATA_SELFTEST, &data,
340 					   sizeof(data));
341 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
342 					   &nested_hwpt_id[1],
343 					   IOMMU_HWPT_DATA_SELFTEST, &data,
344 					   sizeof(data));
345 		test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
346 					 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
347 					 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
348 					 &data, sizeof(data));
349 		test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
350 					 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
351 					 IOMMU_HWPT_DATA_SELFTEST, &data,
352 					 sizeof(data));
353 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
354 					      IOMMU_TEST_IOTLB_DEFAULT);
355 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
356 					      IOMMU_TEST_IOTLB_DEFAULT);
357 
358 		/* Negative test: a nested hwpt on top of a nested hwpt */
359 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
360 					   nested_hwpt_id[0], 0, &test_hwpt_id,
361 					   IOMMU_HWPT_DATA_SELFTEST, &data,
362 					   sizeof(data));
363 		/* Negative test: parent hwpt now cannot be freed */
364 		EXPECT_ERRNO(EBUSY,
365 			     _test_ioctl_destroy(self->fd, parent_hwpt_id));
366 
367 		/* hwpt_invalidate only supports a user-managed hwpt (nested) */
368 		num_inv = 1;
369 		test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
370 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
371 					 sizeof(*inv_reqs), &num_inv);
372 		assert(!num_inv);
373 
374 		/* Check data_type by passing zero-length array */
375 		num_inv = 0;
376 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
377 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
378 					 sizeof(*inv_reqs), &num_inv);
379 		assert(!num_inv);
380 
381 		/* Negative test: Invalid data_type */
382 		num_inv = 1;
383 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
384 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
385 					 sizeof(*inv_reqs), &num_inv);
386 		assert(!num_inv);
387 
388 		/* Negative test: structure size sanity */
389 		num_inv = 1;
390 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
391 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
392 					 sizeof(*inv_reqs) + 1, &num_inv);
393 		assert(!num_inv);
394 
395 		num_inv = 1;
396 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
397 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
398 					 1, &num_inv);
399 		assert(!num_inv);
400 
401 		/* Negative test: invalid flag is passed */
402 		num_inv = 1;
403 		inv_reqs[0].flags = 0xffffffff;
404 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
405 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
406 					 sizeof(*inv_reqs), &num_inv);
407 		assert(!num_inv);
408 
409 		/* Negative test: invalid data_uptr when array is not empty */
410 		num_inv = 1;
411 		inv_reqs[0].flags = 0;
412 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
413 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
414 					 sizeof(*inv_reqs), &num_inv);
415 		assert(!num_inv);
416 
417 		/* Negative test: invalid entry_len when array is not empty */
418 		num_inv = 1;
419 		inv_reqs[0].flags = 0;
420 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
421 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
422 					 0, &num_inv);
423 		assert(!num_inv);
424 
425 		/* Negative test: invalid iotlb_id */
426 		num_inv = 1;
427 		inv_reqs[0].flags = 0;
428 		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
429 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
430 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
431 					 sizeof(*inv_reqs), &num_inv);
432 		assert(!num_inv);
433 
434 		/*
435 		 * Invalidate the 1st iotlb entry but fail the 2nd request
436 		 * due to invalid flags configuration in the 2nd request.
437 		 */
438 		num_inv = 2;
439 		inv_reqs[0].flags = 0;
440 		inv_reqs[0].iotlb_id = 0;
441 		inv_reqs[1].flags = 0xffffffff;
442 		inv_reqs[1].iotlb_id = 1;
443 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
444 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
445 					 sizeof(*inv_reqs), &num_inv);
446 		assert(num_inv == 1);
447 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
448 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
449 					  IOMMU_TEST_IOTLB_DEFAULT);
450 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
451 					  IOMMU_TEST_IOTLB_DEFAULT);
452 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
453 					  IOMMU_TEST_IOTLB_DEFAULT);
454 
455 		/*
456 		 * Invalidate the 1st iotlb entry but fail the 2nd request
457 		 * due to invalid iotlb_id configuration in the 2nd request.
458 		 */
459 		num_inv = 2;
460 		inv_reqs[0].flags = 0;
461 		inv_reqs[0].iotlb_id = 0;
462 		inv_reqs[1].flags = 0;
463 		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
464 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
465 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
466 					 sizeof(*inv_reqs), &num_inv);
467 		assert(num_inv == 1);
468 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
469 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
470 					  IOMMU_TEST_IOTLB_DEFAULT);
471 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
472 					  IOMMU_TEST_IOTLB_DEFAULT);
473 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
474 					  IOMMU_TEST_IOTLB_DEFAULT);
475 
476 		/* Invalidate the 2nd iotlb entry and verify */
477 		num_inv = 1;
478 		inv_reqs[0].flags = 0;
479 		inv_reqs[0].iotlb_id = 1;
480 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
481 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
482 					 sizeof(*inv_reqs), &num_inv);
483 		assert(num_inv == 1);
484 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
485 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
486 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
487 					  IOMMU_TEST_IOTLB_DEFAULT);
488 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
489 					  IOMMU_TEST_IOTLB_DEFAULT);
490 
491 		/* Invalidate the 3rd and 4th iotlb entries and verify */
492 		num_inv = 2;
493 		inv_reqs[0].flags = 0;
494 		inv_reqs[0].iotlb_id = 2;
495 		inv_reqs[1].flags = 0;
496 		inv_reqs[1].iotlb_id = 3;
497 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
498 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
499 					 sizeof(*inv_reqs), &num_inv);
500 		assert(num_inv == 2);
501 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
502 
503 		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
504 		num_inv = 1;
505 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
506 		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
507 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
508 					 sizeof(*inv_reqs), &num_inv);
509 		assert(num_inv == 1);
510 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
511 
512 		/* Attach device to nested_hwpt_id[0] that then will be busy */
513 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
514 		EXPECT_ERRNO(EBUSY,
515 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
516 
517 		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
518 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
519 		EXPECT_ERRNO(EBUSY,
520 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
521 		test_ioctl_destroy(nested_hwpt_id[0]);
522 
523 		/* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
524 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
525 		EXPECT_ERRNO(EBUSY,
526 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
527 		/* Trigger an IOPF on the device */
528 		test_cmd_trigger_iopf(self->device_id, fault_fd);
529 
530 		/* Detach from nested_hwpt_id[1] and destroy it */
531 		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
532 		test_ioctl_destroy(nested_hwpt_id[1]);
533 		test_ioctl_destroy(iopf_hwpt_id);
534 
535 		/* Detach from the parent hw_pagetable and destroy it */
536 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
537 		test_ioctl_destroy(parent_hwpt_id);
538 		test_ioctl_destroy(parent_hwpt_id_not_work);
539 		close(fault_fd);
540 		test_ioctl_destroy(fault_id);
541 	} else {
542 		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
543 				    &parent_hwpt_id);
544 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
545 					   parent_hwpt_id, 0,
546 					   &nested_hwpt_id[0],
547 					   IOMMU_HWPT_DATA_SELFTEST, &data,
548 					   sizeof(data));
549 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
550 					   parent_hwpt_id, 0,
551 					   &nested_hwpt_id[1],
552 					   IOMMU_HWPT_DATA_SELFTEST, &data,
553 					   sizeof(data));
554 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
555 					     nested_hwpt_id[0]);
556 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
557 					     nested_hwpt_id[1]);
558 	}
559 }
560 
TEST_F(iommufd_ioas,hwpt_attach)561 TEST_F(iommufd_ioas, hwpt_attach)
562 {
563 	/* Create a device attached directly to a hwpt */
564 	if (self->stdev_id) {
565 		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
566 	} else {
567 		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
568 	}
569 }
570 
TEST_F(iommufd_ioas,ioas_area_destroy)571 TEST_F(iommufd_ioas, ioas_area_destroy)
572 {
573 	/* Adding an area does not change ability to destroy */
574 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
575 	if (self->stdev_id)
576 		EXPECT_ERRNO(EBUSY,
577 			     _test_ioctl_destroy(self->fd, self->ioas_id));
578 	else
579 		test_ioctl_destroy(self->ioas_id);
580 }
581 
TEST_F(iommufd_ioas,ioas_area_auto_destroy)582 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
583 {
584 	int i;
585 
586 	/* Can allocate and automatically free an IOAS table with many areas */
587 	for (i = 0; i != 10; i++) {
588 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
589 					  self->base_iova + i * PAGE_SIZE);
590 	}
591 }
592 
TEST_F(iommufd_ioas,get_hw_info)593 TEST_F(iommufd_ioas, get_hw_info)
594 {
595 	struct iommu_test_hw_info buffer_exact;
596 	struct iommu_test_hw_info_buffer_larger {
597 		struct iommu_test_hw_info info;
598 		uint64_t trailing_bytes;
599 	} buffer_larger;
600 	struct iommu_test_hw_info_buffer_smaller {
601 		__u32 flags;
602 	} buffer_smaller;
603 
604 	if (self->device_id) {
605 		/* Provide a zero-size user_buffer */
606 		test_cmd_get_hw_info(self->device_id, NULL, 0);
607 		/* Provide a user_buffer with exact size */
608 		test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
609 		/*
610 		 * Provide a user_buffer with size larger than the exact size to check if
611 		 * kernel zero the trailing bytes.
612 		 */
613 		test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
614 		/*
615 		 * Provide a user_buffer with size smaller than the exact size to check if
616 		 * the fields within the size range still gets updated.
617 		 */
618 		test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
619 	} else {
620 		test_err_get_hw_info(ENOENT, self->device_id,
621 				     &buffer_exact, sizeof(buffer_exact));
622 		test_err_get_hw_info(ENOENT, self->device_id,
623 				     &buffer_larger, sizeof(buffer_larger));
624 	}
625 }
626 
TEST_F(iommufd_ioas,area)627 TEST_F(iommufd_ioas, area)
628 {
629 	int i;
630 
631 	/* Unmap fails if nothing is mapped */
632 	for (i = 0; i != 10; i++)
633 		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
634 
635 	/* Unmap works */
636 	for (i = 0; i != 10; i++)
637 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
638 					  self->base_iova + i * PAGE_SIZE);
639 	for (i = 0; i != 10; i++)
640 		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
641 				      PAGE_SIZE);
642 
643 	/* Split fails */
644 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
645 				  self->base_iova + 16 * PAGE_SIZE);
646 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
647 				  PAGE_SIZE);
648 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
649 				  PAGE_SIZE);
650 
651 	/* Over map fails */
652 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
653 				      self->base_iova + 16 * PAGE_SIZE);
654 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
655 				      self->base_iova + 16 * PAGE_SIZE);
656 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
657 				      self->base_iova + 17 * PAGE_SIZE);
658 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
659 				      self->base_iova + 15 * PAGE_SIZE);
660 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
661 				      self->base_iova + 15 * PAGE_SIZE);
662 
663 	/* unmap all works */
664 	test_ioctl_ioas_unmap(0, UINT64_MAX);
665 
666 	/* Unmap all succeeds on an empty IOAS */
667 	test_ioctl_ioas_unmap(0, UINT64_MAX);
668 }
669 
TEST_F(iommufd_ioas,unmap_fully_contained_areas)670 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
671 {
672 	uint64_t unmap_len;
673 	int i;
674 
675 	/* Give no_domain some space to rewind base_iova */
676 	self->base_iova += 4 * PAGE_SIZE;
677 
678 	for (i = 0; i != 4; i++)
679 		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
680 					  self->base_iova + i * 16 * PAGE_SIZE);
681 
682 	/* Unmap not fully contained area doesn't work */
683 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
684 				  8 * PAGE_SIZE);
685 	test_err_ioctl_ioas_unmap(ENOENT,
686 				  self->base_iova + 3 * 16 * PAGE_SIZE +
687 					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
688 				  8 * PAGE_SIZE);
689 
690 	/* Unmap fully contained areas works */
691 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
692 					    self->base_iova - 4 * PAGE_SIZE,
693 					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
694 						    4 * PAGE_SIZE,
695 					    &unmap_len));
696 	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
697 }
698 
TEST_F(iommufd_ioas,area_auto_iova)699 TEST_F(iommufd_ioas, area_auto_iova)
700 {
701 	struct iommu_test_cmd test_cmd = {
702 		.size = sizeof(test_cmd),
703 		.op = IOMMU_TEST_OP_ADD_RESERVED,
704 		.id = self->ioas_id,
705 		.add_reserved = { .start = PAGE_SIZE * 4,
706 				  .length = PAGE_SIZE * 100 },
707 	};
708 	struct iommu_iova_range ranges[1] = {};
709 	struct iommu_ioas_allow_iovas allow_cmd = {
710 		.size = sizeof(allow_cmd),
711 		.ioas_id = self->ioas_id,
712 		.num_iovas = 1,
713 		.allowed_iovas = (uintptr_t)ranges,
714 	};
715 	__u64 iovas[10];
716 	int i;
717 
718 	/* Simple 4k pages */
719 	for (i = 0; i != 10; i++)
720 		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
721 	for (i = 0; i != 10; i++)
722 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
723 
724 	/* Kernel automatically aligns IOVAs properly */
725 	for (i = 0; i != 10; i++) {
726 		size_t length = PAGE_SIZE * (i + 1);
727 
728 		if (self->stdev_id) {
729 			test_ioctl_ioas_map(buffer, length, &iovas[i]);
730 		} else {
731 			test_ioctl_ioas_map((void *)(1UL << 31), length,
732 					    &iovas[i]);
733 		}
734 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
735 	}
736 	for (i = 0; i != 10; i++)
737 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
738 
739 	/* Avoids a reserved region */
740 	ASSERT_EQ(0,
741 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
742 			&test_cmd));
743 	for (i = 0; i != 10; i++) {
744 		size_t length = PAGE_SIZE * (i + 1);
745 
746 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
747 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
748 		EXPECT_EQ(false,
749 			  iovas[i] > test_cmd.add_reserved.start &&
750 				  iovas[i] <
751 					  test_cmd.add_reserved.start +
752 						  test_cmd.add_reserved.length);
753 	}
754 	for (i = 0; i != 10; i++)
755 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
756 
757 	/* Allowed region intersects with a reserved region */
758 	ranges[0].start = PAGE_SIZE;
759 	ranges[0].last = PAGE_SIZE * 600;
760 	EXPECT_ERRNO(EADDRINUSE,
761 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
762 
763 	/* Allocate from an allowed region */
764 	if (self->stdev_id) {
765 		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
766 		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
767 	} else {
768 		ranges[0].start = PAGE_SIZE * 200;
769 		ranges[0].last = PAGE_SIZE * 600 - 1;
770 	}
771 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
772 	for (i = 0; i != 10; i++) {
773 		size_t length = PAGE_SIZE * (i + 1);
774 
775 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
776 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
777 		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
778 		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
779 		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
780 		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
781 	}
782 	for (i = 0; i != 10; i++)
783 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
784 }
785 
TEST_F(iommufd_ioas,area_allowed)786 TEST_F(iommufd_ioas, area_allowed)
787 {
788 	struct iommu_test_cmd test_cmd = {
789 		.size = sizeof(test_cmd),
790 		.op = IOMMU_TEST_OP_ADD_RESERVED,
791 		.id = self->ioas_id,
792 		.add_reserved = { .start = PAGE_SIZE * 4,
793 				  .length = PAGE_SIZE * 100 },
794 	};
795 	struct iommu_iova_range ranges[1] = {};
796 	struct iommu_ioas_allow_iovas allow_cmd = {
797 		.size = sizeof(allow_cmd),
798 		.ioas_id = self->ioas_id,
799 		.num_iovas = 1,
800 		.allowed_iovas = (uintptr_t)ranges,
801 	};
802 
803 	/* Reserved intersects an allowed */
804 	allow_cmd.num_iovas = 1;
805 	ranges[0].start = self->base_iova;
806 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
807 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
808 	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
809 	test_cmd.add_reserved.length = PAGE_SIZE;
810 	EXPECT_ERRNO(EADDRINUSE,
811 		     ioctl(self->fd,
812 			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
813 			   &test_cmd));
814 	allow_cmd.num_iovas = 0;
815 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
816 
817 	/* Allowed intersects a reserved */
818 	ASSERT_EQ(0,
819 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
820 			&test_cmd));
821 	allow_cmd.num_iovas = 1;
822 	ranges[0].start = self->base_iova;
823 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
824 	EXPECT_ERRNO(EADDRINUSE,
825 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
826 }
827 
TEST_F(iommufd_ioas,copy_area)828 TEST_F(iommufd_ioas, copy_area)
829 {
830 	struct iommu_ioas_copy copy_cmd = {
831 		.size = sizeof(copy_cmd),
832 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
833 		.dst_ioas_id = self->ioas_id,
834 		.src_ioas_id = self->ioas_id,
835 		.length = PAGE_SIZE,
836 	};
837 
838 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
839 
840 	/* Copy inside a single IOAS */
841 	copy_cmd.src_iova = self->base_iova;
842 	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
843 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
844 
845 	/* Copy between IOAS's */
846 	copy_cmd.src_iova = self->base_iova;
847 	copy_cmd.dst_iova = 0;
848 	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
849 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
850 }
851 
TEST_F(iommufd_ioas,iova_ranges)852 TEST_F(iommufd_ioas, iova_ranges)
853 {
854 	struct iommu_test_cmd test_cmd = {
855 		.size = sizeof(test_cmd),
856 		.op = IOMMU_TEST_OP_ADD_RESERVED,
857 		.id = self->ioas_id,
858 		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
859 	};
860 	struct iommu_iova_range *ranges = buffer;
861 	struct iommu_ioas_iova_ranges ranges_cmd = {
862 		.size = sizeof(ranges_cmd),
863 		.ioas_id = self->ioas_id,
864 		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
865 		.allowed_iovas = (uintptr_t)ranges,
866 	};
867 
868 	/* Range can be read */
869 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
870 	EXPECT_EQ(1, ranges_cmd.num_iovas);
871 	if (!self->stdev_id) {
872 		EXPECT_EQ(0, ranges[0].start);
873 		EXPECT_EQ(SIZE_MAX, ranges[0].last);
874 		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
875 	} else {
876 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
877 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
878 		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
879 	}
880 
881 	/* Buffer too small */
882 	memset(ranges, 0, BUFFER_SIZE);
883 	ranges_cmd.num_iovas = 0;
884 	EXPECT_ERRNO(EMSGSIZE,
885 		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
886 	EXPECT_EQ(1, ranges_cmd.num_iovas);
887 	EXPECT_EQ(0, ranges[0].start);
888 	EXPECT_EQ(0, ranges[0].last);
889 
890 	/* 2 ranges */
891 	ASSERT_EQ(0,
892 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
893 			&test_cmd));
894 	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
895 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
896 	if (!self->stdev_id) {
897 		EXPECT_EQ(2, ranges_cmd.num_iovas);
898 		EXPECT_EQ(0, ranges[0].start);
899 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
900 		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
901 		EXPECT_EQ(SIZE_MAX, ranges[1].last);
902 	} else {
903 		EXPECT_EQ(1, ranges_cmd.num_iovas);
904 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
905 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
906 	}
907 
908 	/* Buffer too small */
909 	memset(ranges, 0, BUFFER_SIZE);
910 	ranges_cmd.num_iovas = 1;
911 	if (!self->stdev_id) {
912 		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
913 					     &ranges_cmd));
914 		EXPECT_EQ(2, ranges_cmd.num_iovas);
915 		EXPECT_EQ(0, ranges[0].start);
916 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
917 	} else {
918 		ASSERT_EQ(0,
919 			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
920 		EXPECT_EQ(1, ranges_cmd.num_iovas);
921 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
922 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
923 	}
924 	EXPECT_EQ(0, ranges[1].start);
925 	EXPECT_EQ(0, ranges[1].last);
926 }
927 
TEST_F(iommufd_ioas,access_domain_destory)928 TEST_F(iommufd_ioas, access_domain_destory)
929 {
930 	struct iommu_test_cmd access_cmd = {
931 		.size = sizeof(access_cmd),
932 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
933 		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
934 				  .length = PAGE_SIZE},
935 	};
936 	size_t buf_size = 2 * HUGEPAGE_SIZE;
937 	uint8_t *buf;
938 
939 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
940 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
941 		   0);
942 	ASSERT_NE(MAP_FAILED, buf);
943 	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
944 
945 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
946 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
947 	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
948 	ASSERT_EQ(0,
949 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
950 			&access_cmd));
951 
952 	/* Causes a complicated unpin across a huge page boundary */
953 	if (self->stdev_id)
954 		test_ioctl_destroy(self->stdev_id);
955 
956 	test_cmd_destroy_access_pages(
957 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
958 	test_cmd_destroy_access(access_cmd.id);
959 	ASSERT_EQ(0, munmap(buf, buf_size));
960 }
961 
TEST_F(iommufd_ioas,access_pin)962 TEST_F(iommufd_ioas, access_pin)
963 {
964 	struct iommu_test_cmd access_cmd = {
965 		.size = sizeof(access_cmd),
966 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
967 		.access_pages = { .iova = MOCK_APERTURE_START,
968 				  .length = BUFFER_SIZE,
969 				  .uptr = (uintptr_t)buffer },
970 	};
971 	struct iommu_test_cmd check_map_cmd = {
972 		.size = sizeof(check_map_cmd),
973 		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
974 		.check_map = { .iova = MOCK_APERTURE_START,
975 			       .length = BUFFER_SIZE,
976 			       .uptr = (uintptr_t)buffer },
977 	};
978 	uint32_t access_pages_id;
979 	unsigned int npages;
980 
981 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
982 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
983 
984 	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
985 		uint32_t mock_stdev_id;
986 		uint32_t mock_hwpt_id;
987 
988 		access_cmd.access_pages.length = npages * PAGE_SIZE;
989 
990 		/* Single map/unmap */
991 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
992 					  MOCK_APERTURE_START);
993 		ASSERT_EQ(0, ioctl(self->fd,
994 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
995 				   &access_cmd));
996 		test_cmd_destroy_access_pages(
997 			access_cmd.id,
998 			access_cmd.access_pages.out_access_pages_id);
999 
1000 		/* Double user */
1001 		ASSERT_EQ(0, ioctl(self->fd,
1002 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1003 				   &access_cmd));
1004 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
1005 		ASSERT_EQ(0, ioctl(self->fd,
1006 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1007 				   &access_cmd));
1008 		test_cmd_destroy_access_pages(
1009 			access_cmd.id,
1010 			access_cmd.access_pages.out_access_pages_id);
1011 		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1012 
1013 		/* Add/remove a domain with a user */
1014 		ASSERT_EQ(0, ioctl(self->fd,
1015 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1016 				   &access_cmd));
1017 		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1018 				     &mock_hwpt_id, NULL);
1019 		check_map_cmd.id = mock_hwpt_id;
1020 		ASSERT_EQ(0, ioctl(self->fd,
1021 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1022 				   &check_map_cmd));
1023 
1024 		test_ioctl_destroy(mock_stdev_id);
1025 		test_cmd_destroy_access_pages(
1026 			access_cmd.id,
1027 			access_cmd.access_pages.out_access_pages_id);
1028 
1029 		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1030 	}
1031 	test_cmd_destroy_access(access_cmd.id);
1032 }
1033 
TEST_F(iommufd_ioas,access_pin_unmap)1034 TEST_F(iommufd_ioas, access_pin_unmap)
1035 {
1036 	struct iommu_test_cmd access_pages_cmd = {
1037 		.size = sizeof(access_pages_cmd),
1038 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1039 		.access_pages = { .iova = MOCK_APERTURE_START,
1040 				  .length = BUFFER_SIZE,
1041 				  .uptr = (uintptr_t)buffer },
1042 	};
1043 
1044 	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1045 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1046 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1047 	ASSERT_EQ(0,
1048 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1049 			&access_pages_cmd));
1050 
1051 	/* Trigger the unmap op */
1052 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1053 
1054 	/* kernel removed the item for us */
1055 	test_err_destroy_access_pages(
1056 		ENOENT, access_pages_cmd.id,
1057 		access_pages_cmd.access_pages.out_access_pages_id);
1058 }
1059 
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1060 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1061 			    unsigned int access_id, uint64_t iova,
1062 			    unsigned int def_flags)
1063 {
1064 	uint16_t tmp[32];
1065 	struct iommu_test_cmd access_cmd = {
1066 		.size = sizeof(access_cmd),
1067 		.op = IOMMU_TEST_OP_ACCESS_RW,
1068 		.id = access_id,
1069 		.access_rw = { .uptr = (uintptr_t)tmp },
1070 	};
1071 	uint16_t *buffer16 = buffer;
1072 	unsigned int i;
1073 	void *tmp2;
1074 
1075 	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1076 		buffer16[i] = rand();
1077 
1078 	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1079 	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1080 	     access_cmd.access_rw.iova++) {
1081 		for (access_cmd.access_rw.length = 1;
1082 		     access_cmd.access_rw.length < sizeof(tmp);
1083 		     access_cmd.access_rw.length++) {
1084 			access_cmd.access_rw.flags = def_flags;
1085 			ASSERT_EQ(0, ioctl(fd,
1086 					   _IOMMU_TEST_CMD(
1087 						   IOMMU_TEST_OP_ACCESS_RW),
1088 					   &access_cmd));
1089 			ASSERT_EQ(0,
1090 				  memcmp(buffer + (access_cmd.access_rw.iova -
1091 						   iova),
1092 					 tmp, access_cmd.access_rw.length));
1093 
1094 			for (i = 0; i != ARRAY_SIZE(tmp); i++)
1095 				tmp[i] = rand();
1096 			access_cmd.access_rw.flags = def_flags |
1097 						     MOCK_ACCESS_RW_WRITE;
1098 			ASSERT_EQ(0, ioctl(fd,
1099 					   _IOMMU_TEST_CMD(
1100 						   IOMMU_TEST_OP_ACCESS_RW),
1101 					   &access_cmd));
1102 			ASSERT_EQ(0,
1103 				  memcmp(buffer + (access_cmd.access_rw.iova -
1104 						   iova),
1105 					 tmp, access_cmd.access_rw.length));
1106 		}
1107 	}
1108 
1109 	/* Multi-page test */
1110 	tmp2 = malloc(BUFFER_SIZE);
1111 	ASSERT_NE(NULL, tmp2);
1112 	access_cmd.access_rw.iova = iova;
1113 	access_cmd.access_rw.length = BUFFER_SIZE;
1114 	access_cmd.access_rw.flags = def_flags;
1115 	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1116 	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1117 			   &access_cmd));
1118 	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1119 	free(tmp2);
1120 }
1121 
TEST_F(iommufd_ioas,access_rw)1122 TEST_F(iommufd_ioas, access_rw)
1123 {
1124 	__u32 access_id;
1125 	__u64 iova;
1126 
1127 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1128 	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1129 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1130 	check_access_rw(_metadata, self->fd, access_id, iova,
1131 			MOCK_ACCESS_RW_SLOW_PATH);
1132 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1133 	test_cmd_destroy_access(access_id);
1134 }
1135 
TEST_F(iommufd_ioas,access_rw_unaligned)1136 TEST_F(iommufd_ioas, access_rw_unaligned)
1137 {
1138 	__u32 access_id;
1139 	__u64 iova;
1140 
1141 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1142 
1143 	/* Unaligned pages */
1144 	iova = self->base_iova + MOCK_PAGE_SIZE;
1145 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1146 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1147 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1148 	test_cmd_destroy_access(access_id);
1149 }
1150 
TEST_F(iommufd_ioas,fork_gone)1151 TEST_F(iommufd_ioas, fork_gone)
1152 {
1153 	__u32 access_id;
1154 	pid_t child;
1155 
1156 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1157 
1158 	/* Create a mapping with a different mm */
1159 	child = fork();
1160 	if (!child) {
1161 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1162 					  MOCK_APERTURE_START);
1163 		exit(0);
1164 	}
1165 	ASSERT_NE(-1, child);
1166 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1167 
1168 	if (self->stdev_id) {
1169 		/*
1170 		 * If a domain already existed then everything was pinned within
1171 		 * the fork, so this copies from one domain to another.
1172 		 */
1173 		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1174 		check_access_rw(_metadata, self->fd, access_id,
1175 				MOCK_APERTURE_START, 0);
1176 
1177 	} else {
1178 		/*
1179 		 * Otherwise we need to actually pin pages which can't happen
1180 		 * since the fork is gone.
1181 		 */
1182 		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1183 	}
1184 
1185 	test_cmd_destroy_access(access_id);
1186 }
1187 
TEST_F(iommufd_ioas,fork_present)1188 TEST_F(iommufd_ioas, fork_present)
1189 {
1190 	__u32 access_id;
1191 	int pipefds[2];
1192 	uint64_t tmp;
1193 	pid_t child;
1194 	int efd;
1195 
1196 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1197 
1198 	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1199 	efd = eventfd(0, EFD_CLOEXEC);
1200 	ASSERT_NE(-1, efd);
1201 
1202 	/* Create a mapping with a different mm */
1203 	child = fork();
1204 	if (!child) {
1205 		__u64 iova;
1206 		uint64_t one = 1;
1207 
1208 		close(pipefds[1]);
1209 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1210 					  MOCK_APERTURE_START);
1211 		if (write(efd, &one, sizeof(one)) != sizeof(one))
1212 			exit(100);
1213 		if (read(pipefds[0], &iova, 1) != 1)
1214 			exit(100);
1215 		exit(0);
1216 	}
1217 	close(pipefds[0]);
1218 	ASSERT_NE(-1, child);
1219 	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1220 
1221 	/* Read pages from the remote process */
1222 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1223 	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1224 
1225 	ASSERT_EQ(0, close(pipefds[1]));
1226 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1227 
1228 	test_cmd_destroy_access(access_id);
1229 }
1230 
TEST_F(iommufd_ioas,ioas_option_huge_pages)1231 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1232 {
1233 	struct iommu_option cmd = {
1234 		.size = sizeof(cmd),
1235 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1236 		.op = IOMMU_OPTION_OP_GET,
1237 		.val64 = 3,
1238 		.object_id = self->ioas_id,
1239 	};
1240 
1241 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1242 	ASSERT_EQ(1, cmd.val64);
1243 
1244 	cmd.op = IOMMU_OPTION_OP_SET;
1245 	cmd.val64 = 0;
1246 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1247 
1248 	cmd.op = IOMMU_OPTION_OP_GET;
1249 	cmd.val64 = 3;
1250 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1251 	ASSERT_EQ(0, cmd.val64);
1252 
1253 	cmd.op = IOMMU_OPTION_OP_SET;
1254 	cmd.val64 = 2;
1255 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1256 
1257 	cmd.op = IOMMU_OPTION_OP_SET;
1258 	cmd.val64 = 1;
1259 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1260 }
1261 
TEST_F(iommufd_ioas,ioas_iova_alloc)1262 TEST_F(iommufd_ioas, ioas_iova_alloc)
1263 {
1264 	unsigned int length;
1265 	__u64 iova;
1266 
1267 	for (length = 1; length != PAGE_SIZE * 2; length++) {
1268 		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1269 			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1270 		} else {
1271 			test_ioctl_ioas_map(buffer, length, &iova);
1272 			test_ioctl_ioas_unmap(iova, length);
1273 		}
1274 	}
1275 }
1276 
TEST_F(iommufd_ioas,ioas_align_change)1277 TEST_F(iommufd_ioas, ioas_align_change)
1278 {
1279 	struct iommu_option cmd = {
1280 		.size = sizeof(cmd),
1281 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1282 		.op = IOMMU_OPTION_OP_SET,
1283 		.object_id = self->ioas_id,
1284 		/* 0 means everything must be aligned to PAGE_SIZE */
1285 		.val64 = 0,
1286 	};
1287 
1288 	/*
1289 	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1290 	 * and map are present.
1291 	 */
1292 	if (variant->mock_domains)
1293 		return;
1294 
1295 	/*
1296 	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1297 	 */
1298 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1299 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1300 
1301 	/* Misalignment is rejected at map time */
1302 	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1303 				      PAGE_SIZE,
1304 				      MOCK_APERTURE_START + PAGE_SIZE);
1305 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1306 
1307 	/* Reduce alignment */
1308 	cmd.val64 = 1;
1309 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1310 
1311 	/* Confirm misalignment is rejected during alignment upgrade */
1312 	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1313 				  MOCK_APERTURE_START + PAGE_SIZE);
1314 	cmd.val64 = 0;
1315 	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1316 
1317 	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1318 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1319 }
1320 
TEST_F(iommufd_ioas,copy_sweep)1321 TEST_F(iommufd_ioas, copy_sweep)
1322 {
1323 	struct iommu_ioas_copy copy_cmd = {
1324 		.size = sizeof(copy_cmd),
1325 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1326 		.src_ioas_id = self->ioas_id,
1327 		.dst_iova = MOCK_APERTURE_START,
1328 		.length = MOCK_PAGE_SIZE,
1329 	};
1330 	unsigned int dst_ioas_id;
1331 	uint64_t last_iova;
1332 	uint64_t iova;
1333 
1334 	test_ioctl_ioas_alloc(&dst_ioas_id);
1335 	copy_cmd.dst_ioas_id = dst_ioas_id;
1336 
1337 	if (variant->mock_domains)
1338 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1339 	else
1340 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1341 
1342 	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1343 				  MOCK_APERTURE_START);
1344 
1345 	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1346 	     iova += 511) {
1347 		copy_cmd.src_iova = iova;
1348 		if (iova < MOCK_APERTURE_START ||
1349 		    iova + copy_cmd.length - 1 > last_iova) {
1350 			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1351 						   &copy_cmd));
1352 		} else {
1353 			ASSERT_EQ(0,
1354 				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1355 			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1356 						 copy_cmd.length);
1357 		}
1358 	}
1359 
1360 	test_ioctl_destroy(dst_ioas_id);
1361 }
1362 
FIXTURE(iommufd_mock_domain)1363 FIXTURE(iommufd_mock_domain)
1364 {
1365 	int fd;
1366 	uint32_t ioas_id;
1367 	uint32_t hwpt_id;
1368 	uint32_t hwpt_ids[2];
1369 	uint32_t stdev_ids[2];
1370 	uint32_t idev_ids[2];
1371 	int mmap_flags;
1372 	size_t mmap_buf_size;
1373 };
1374 
FIXTURE_VARIANT(iommufd_mock_domain)1375 FIXTURE_VARIANT(iommufd_mock_domain)
1376 {
1377 	unsigned int mock_domains;
1378 	bool hugepages;
1379 };
1380 
FIXTURE_SETUP(iommufd_mock_domain)1381 FIXTURE_SETUP(iommufd_mock_domain)
1382 {
1383 	unsigned int i;
1384 
1385 	self->fd = open("/dev/iommu", O_RDWR);
1386 	ASSERT_NE(-1, self->fd);
1387 	test_ioctl_ioas_alloc(&self->ioas_id);
1388 
1389 	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1390 
1391 	for (i = 0; i != variant->mock_domains; i++)
1392 		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1393 				     &self->hwpt_ids[i], &self->idev_ids[i]);
1394 	self->hwpt_id = self->hwpt_ids[0];
1395 
1396 	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1397 	self->mmap_buf_size = PAGE_SIZE * 8;
1398 	if (variant->hugepages) {
1399 		/*
1400 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1401 		 * not available.
1402 		 */
1403 		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1404 		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1405 	}
1406 }
1407 
FIXTURE_TEARDOWN(iommufd_mock_domain)1408 FIXTURE_TEARDOWN(iommufd_mock_domain)
1409 {
1410 	teardown_iommufd(self->fd, _metadata);
1411 }
1412 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1413 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1414 {
1415 	.mock_domains = 1,
1416 	.hugepages = false,
1417 };
1418 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1419 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1420 {
1421 	.mock_domains = 2,
1422 	.hugepages = false,
1423 };
1424 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1425 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1426 {
1427 	.mock_domains = 1,
1428 	.hugepages = true,
1429 };
1430 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1431 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1432 {
1433 	.mock_domains = 2,
1434 	.hugepages = true,
1435 };
1436 
1437 /* Have the kernel check that the user pages made it to the iommu_domain */
1438 #define check_mock_iova(_ptr, _iova, _length)                                \
1439 	({                                                                   \
1440 		struct iommu_test_cmd check_map_cmd = {                      \
1441 			.size = sizeof(check_map_cmd),                       \
1442 			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1443 			.id = self->hwpt_id,                                 \
1444 			.check_map = { .iova = _iova,                        \
1445 				       .length = _length,                    \
1446 				       .uptr = (uintptr_t)(_ptr) },          \
1447 		};                                                           \
1448 		ASSERT_EQ(0,                                                 \
1449 			  ioctl(self->fd,                                    \
1450 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1451 				&check_map_cmd));                            \
1452 		if (self->hwpt_ids[1]) {                                     \
1453 			check_map_cmd.id = self->hwpt_ids[1];                \
1454 			ASSERT_EQ(0,                                         \
1455 				  ioctl(self->fd,                            \
1456 					_IOMMU_TEST_CMD(                     \
1457 						IOMMU_TEST_OP_MD_CHECK_MAP), \
1458 					&check_map_cmd));                    \
1459 		}                                                            \
1460 	})
1461 
TEST_F(iommufd_mock_domain,basic)1462 TEST_F(iommufd_mock_domain, basic)
1463 {
1464 	size_t buf_size = self->mmap_buf_size;
1465 	uint8_t *buf;
1466 	__u64 iova;
1467 
1468 	/* Simple one page map */
1469 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1470 	check_mock_iova(buffer, iova, PAGE_SIZE);
1471 
1472 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1473 		   0);
1474 	ASSERT_NE(MAP_FAILED, buf);
1475 
1476 	/* EFAULT half way through mapping */
1477 	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1478 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1479 
1480 	/* EFAULT on first page */
1481 	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1482 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1483 }
1484 
TEST_F(iommufd_mock_domain,ro_unshare)1485 TEST_F(iommufd_mock_domain, ro_unshare)
1486 {
1487 	uint8_t *buf;
1488 	__u64 iova;
1489 	int fd;
1490 
1491 	fd = open("/proc/self/exe", O_RDONLY);
1492 	ASSERT_NE(-1, fd);
1493 
1494 	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1495 	ASSERT_NE(MAP_FAILED, buf);
1496 	close(fd);
1497 
1498 	/*
1499 	 * There have been lots of changes to the "unshare" mechanism in
1500 	 * get_user_pages(), make sure it works right. The write to the page
1501 	 * after we map it for reading should not change the assigned PFN.
1502 	 */
1503 	ASSERT_EQ(0,
1504 		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1505 				       &iova, IOMMU_IOAS_MAP_READABLE));
1506 	check_mock_iova(buf, iova, PAGE_SIZE);
1507 	memset(buf, 1, PAGE_SIZE);
1508 	check_mock_iova(buf, iova, PAGE_SIZE);
1509 	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1510 }
1511 
TEST_F(iommufd_mock_domain,all_aligns)1512 TEST_F(iommufd_mock_domain, all_aligns)
1513 {
1514 	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1515 						MOCK_PAGE_SIZE;
1516 	size_t buf_size = self->mmap_buf_size;
1517 	unsigned int start;
1518 	unsigned int end;
1519 	uint8_t *buf;
1520 
1521 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1522 		   0);
1523 	ASSERT_NE(MAP_FAILED, buf);
1524 	check_refs(buf, buf_size, 0);
1525 
1526 	/*
1527 	 * Map every combination of page size and alignment within a big region,
1528 	 * less for hugepage case as it takes so long to finish.
1529 	 */
1530 	for (start = 0; start < buf_size; start += test_step) {
1531 		if (variant->hugepages)
1532 			end = buf_size;
1533 		else
1534 			end = start + MOCK_PAGE_SIZE;
1535 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1536 			size_t length = end - start;
1537 			__u64 iova;
1538 
1539 			test_ioctl_ioas_map(buf + start, length, &iova);
1540 			check_mock_iova(buf + start, iova, length);
1541 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1542 				   end / PAGE_SIZE * PAGE_SIZE -
1543 					   start / PAGE_SIZE * PAGE_SIZE,
1544 				   1);
1545 
1546 			test_ioctl_ioas_unmap(iova, length);
1547 		}
1548 	}
1549 	check_refs(buf, buf_size, 0);
1550 	ASSERT_EQ(0, munmap(buf, buf_size));
1551 }
1552 
TEST_F(iommufd_mock_domain,all_aligns_copy)1553 TEST_F(iommufd_mock_domain, all_aligns_copy)
1554 {
1555 	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1556 						MOCK_PAGE_SIZE;
1557 	size_t buf_size = self->mmap_buf_size;
1558 	unsigned int start;
1559 	unsigned int end;
1560 	uint8_t *buf;
1561 
1562 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1563 		   0);
1564 	ASSERT_NE(MAP_FAILED, buf);
1565 	check_refs(buf, buf_size, 0);
1566 
1567 	/*
1568 	 * Map every combination of page size and alignment within a big region,
1569 	 * less for hugepage case as it takes so long to finish.
1570 	 */
1571 	for (start = 0; start < buf_size; start += test_step) {
1572 		if (variant->hugepages)
1573 			end = buf_size;
1574 		else
1575 			end = start + MOCK_PAGE_SIZE;
1576 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1577 			size_t length = end - start;
1578 			unsigned int old_id;
1579 			uint32_t mock_stdev_id;
1580 			__u64 iova;
1581 
1582 			test_ioctl_ioas_map(buf + start, length, &iova);
1583 
1584 			/* Add and destroy a domain while the area exists */
1585 			old_id = self->hwpt_ids[1];
1586 			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1587 					     &self->hwpt_ids[1], NULL);
1588 
1589 			check_mock_iova(buf + start, iova, length);
1590 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1591 				   end / PAGE_SIZE * PAGE_SIZE -
1592 					   start / PAGE_SIZE * PAGE_SIZE,
1593 				   1);
1594 
1595 			test_ioctl_destroy(mock_stdev_id);
1596 			self->hwpt_ids[1] = old_id;
1597 
1598 			test_ioctl_ioas_unmap(iova, length);
1599 		}
1600 	}
1601 	check_refs(buf, buf_size, 0);
1602 	ASSERT_EQ(0, munmap(buf, buf_size));
1603 }
1604 
TEST_F(iommufd_mock_domain,user_copy)1605 TEST_F(iommufd_mock_domain, user_copy)
1606 {
1607 	struct iommu_test_cmd access_cmd = {
1608 		.size = sizeof(access_cmd),
1609 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1610 		.access_pages = { .length = BUFFER_SIZE,
1611 				  .uptr = (uintptr_t)buffer },
1612 	};
1613 	struct iommu_ioas_copy copy_cmd = {
1614 		.size = sizeof(copy_cmd),
1615 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1616 		.dst_ioas_id = self->ioas_id,
1617 		.dst_iova = MOCK_APERTURE_START,
1618 		.length = BUFFER_SIZE,
1619 	};
1620 	struct iommu_ioas_unmap unmap_cmd = {
1621 		.size = sizeof(unmap_cmd),
1622 		.ioas_id = self->ioas_id,
1623 		.iova = MOCK_APERTURE_START,
1624 		.length = BUFFER_SIZE,
1625 	};
1626 	unsigned int new_ioas_id, ioas_id;
1627 
1628 	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1629 	test_ioctl_ioas_alloc(&ioas_id);
1630 	test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1631 			       &copy_cmd.src_iova);
1632 
1633 	test_cmd_create_access(ioas_id, &access_cmd.id,
1634 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1635 
1636 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1637 	ASSERT_EQ(0,
1638 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1639 			&access_cmd));
1640 	copy_cmd.src_ioas_id = ioas_id;
1641 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1642 	check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1643 
1644 	/* Now replace the ioas with a new one */
1645 	test_ioctl_ioas_alloc(&new_ioas_id);
1646 	test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1647 			       &copy_cmd.src_iova);
1648 	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1649 
1650 	/* Destroy the old ioas and cleanup copied mapping */
1651 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1652 	test_ioctl_destroy(ioas_id);
1653 
1654 	/* Then run the same test again with the new ioas */
1655 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1656 	ASSERT_EQ(0,
1657 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1658 			&access_cmd));
1659 	copy_cmd.src_ioas_id = new_ioas_id;
1660 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1661 	check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1662 
1663 	test_cmd_destroy_access_pages(
1664 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1665 	test_cmd_destroy_access(access_cmd.id);
1666 
1667 	test_ioctl_destroy(new_ioas_id);
1668 }
1669 
TEST_F(iommufd_mock_domain,replace)1670 TEST_F(iommufd_mock_domain, replace)
1671 {
1672 	uint32_t ioas_id;
1673 
1674 	test_ioctl_ioas_alloc(&ioas_id);
1675 
1676 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1677 
1678 	/*
1679 	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1680 	 * should get enoent when we try to use it.
1681 	 */
1682 	if (variant->mock_domains == 1)
1683 		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1684 					     self->hwpt_ids[0]);
1685 
1686 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1687 	if (variant->mock_domains >= 2) {
1688 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1689 					     self->hwpt_ids[1]);
1690 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1691 					     self->hwpt_ids[1]);
1692 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1693 					     self->hwpt_ids[0]);
1694 	}
1695 
1696 	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1697 	test_ioctl_destroy(ioas_id);
1698 }
1699 
TEST_F(iommufd_mock_domain,alloc_hwpt)1700 TEST_F(iommufd_mock_domain, alloc_hwpt)
1701 {
1702 	int i;
1703 
1704 	for (i = 0; i != variant->mock_domains; i++) {
1705 		uint32_t hwpt_id[2];
1706 		uint32_t stddev_id;
1707 
1708 		test_err_hwpt_alloc(EOPNOTSUPP,
1709 				    self->idev_ids[i], self->ioas_id,
1710 				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1711 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1712 				    0, &hwpt_id[0]);
1713 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1714 				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1715 
1716 		/* Do a hw_pagetable rotation test */
1717 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1718 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1719 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1720 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1721 		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1722 		test_ioctl_destroy(hwpt_id[1]);
1723 
1724 		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1725 		test_ioctl_destroy(stddev_id);
1726 		test_ioctl_destroy(hwpt_id[0]);
1727 	}
1728 }
1729 
FIXTURE(iommufd_dirty_tracking)1730 FIXTURE(iommufd_dirty_tracking)
1731 {
1732 	int fd;
1733 	uint32_t ioas_id;
1734 	uint32_t hwpt_id;
1735 	uint32_t stdev_id;
1736 	uint32_t idev_id;
1737 	unsigned long page_size;
1738 	unsigned long bitmap_size;
1739 	void *bitmap;
1740 	void *buffer;
1741 };
1742 
FIXTURE_VARIANT(iommufd_dirty_tracking)1743 FIXTURE_VARIANT(iommufd_dirty_tracking)
1744 {
1745 	unsigned long buffer_size;
1746 	bool hugepages;
1747 };
1748 
FIXTURE_SETUP(iommufd_dirty_tracking)1749 FIXTURE_SETUP(iommufd_dirty_tracking)
1750 {
1751 	size_t mmap_buffer_size;
1752 	unsigned long size;
1753 	int mmap_flags;
1754 	void *vrc;
1755 	int rc;
1756 
1757 	if (variant->buffer_size < MOCK_PAGE_SIZE) {
1758 		SKIP(return,
1759 		     "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
1760 		     variant->buffer_size, MOCK_PAGE_SIZE);
1761 	}
1762 
1763 	self->fd = open("/dev/iommu", O_RDWR);
1764 	ASSERT_NE(-1, self->fd);
1765 
1766 	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
1767 	mmap_buffer_size = variant->buffer_size;
1768 	if (variant->hugepages) {
1769 		/*
1770 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1771 		 * not available.
1772 		 */
1773 		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1774 
1775 		/*
1776 		 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
1777 		 * following mmap() will automatically align the length to be a
1778 		 * multiple of the underlying huge page size. Failing to do the
1779 		 * same at this allocation will result in a memory overwrite by
1780 		 * the mmap().
1781 		 */
1782 		if (mmap_buffer_size < HUGEPAGE_SIZE)
1783 			mmap_buffer_size = HUGEPAGE_SIZE;
1784 	}
1785 
1786 	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
1787 	if (rc || !self->buffer) {
1788 		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1789 			   mmap_buffer_size, rc);
1790 	}
1791 	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1792 	vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
1793 		   mmap_flags, -1, 0);
1794 	assert(vrc == self->buffer);
1795 
1796 	self->page_size = MOCK_PAGE_SIZE;
1797 	self->bitmap_size = variant->buffer_size / self->page_size;
1798 
1799 	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
1800 	size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
1801 	rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
1802 	assert(!rc);
1803 	assert(self->bitmap);
1804 	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1805 
1806 	test_ioctl_ioas_alloc(&self->ioas_id);
1807 	/* Enable 1M mock IOMMU hugepages */
1808 	if (variant->hugepages) {
1809 		test_cmd_mock_domain_flags(self->ioas_id,
1810 					   MOCK_FLAGS_DEVICE_HUGE_IOVA,
1811 					   &self->stdev_id, &self->hwpt_id,
1812 					   &self->idev_id);
1813 	} else {
1814 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
1815 				     &self->hwpt_id, &self->idev_id);
1816 	}
1817 }
1818 
FIXTURE_TEARDOWN(iommufd_dirty_tracking)1819 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1820 {
1821 	free(self->buffer);
1822 	free(self->bitmap);
1823 	teardown_iommufd(self->fd, _metadata);
1824 }
1825 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)1826 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
1827 {
1828 	/* half of an u8 index bitmap */
1829 	.buffer_size = 8UL * 1024UL,
1830 };
1831 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)1832 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
1833 {
1834 	/* one u8 index bitmap */
1835 	.buffer_size = 16UL * 1024UL,
1836 };
1837 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)1838 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
1839 {
1840 	/* one u32 index bitmap */
1841 	.buffer_size = 64UL * 1024UL,
1842 };
1843 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)1844 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1845 {
1846 	/* one u64 index bitmap */
1847 	.buffer_size = 128UL * 1024UL,
1848 };
1849 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)1850 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
1851 {
1852 	/* two u64 index and trailing end bitmap */
1853 	.buffer_size = 320UL * 1024UL,
1854 };
1855 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)1856 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
1857 {
1858 	/* 4K bitmap (64M IOVA range) */
1859 	.buffer_size = 64UL * 1024UL * 1024UL,
1860 };
1861 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)1862 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
1863 {
1864 	/* 4K bitmap (64M IOVA range) */
1865 	.buffer_size = 64UL * 1024UL * 1024UL,
1866 	.hugepages = true,
1867 };
1868 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)1869 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1870 {
1871 	/* 8K bitmap (128M IOVA range) */
1872 	.buffer_size = 128UL * 1024UL * 1024UL,
1873 };
1874 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)1875 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
1876 {
1877 	/* 8K bitmap (128M IOVA range) */
1878 	.buffer_size = 128UL * 1024UL * 1024UL,
1879 	.hugepages = true,
1880 };
1881 
TEST_F(iommufd_dirty_tracking,enforce_dirty)1882 TEST_F(iommufd_dirty_tracking, enforce_dirty)
1883 {
1884 	uint32_t ioas_id, stddev_id, idev_id;
1885 	uint32_t hwpt_id, _hwpt_id;
1886 	uint32_t dev_flags;
1887 
1888 	/* Regular case */
1889 	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1890 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1891 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1892 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1893 	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1894 				   NULL);
1895 	test_ioctl_destroy(stddev_id);
1896 	test_ioctl_destroy(hwpt_id);
1897 
1898 	/* IOMMU device does not support dirty tracking */
1899 	test_ioctl_ioas_alloc(&ioas_id);
1900 	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1901 				   &idev_id);
1902 	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1903 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1904 	test_ioctl_destroy(stddev_id);
1905 }
1906 
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)1907 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1908 {
1909 	uint32_t stddev_id;
1910 	uint32_t hwpt_id;
1911 
1912 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1913 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1914 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1915 	test_cmd_set_dirty_tracking(hwpt_id, true);
1916 	test_cmd_set_dirty_tracking(hwpt_id, false);
1917 
1918 	test_ioctl_destroy(stddev_id);
1919 	test_ioctl_destroy(hwpt_id);
1920 }
1921 
TEST_F(iommufd_dirty_tracking,device_dirty_capability)1922 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1923 {
1924 	uint32_t caps = 0;
1925 	uint32_t stddev_id;
1926 	uint32_t hwpt_id;
1927 
1928 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1929 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1930 	test_cmd_get_hw_capabilities(self->idev_id, caps,
1931 				     IOMMU_HW_CAP_DIRTY_TRACKING);
1932 	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1933 		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1934 
1935 	test_ioctl_destroy(stddev_id);
1936 	test_ioctl_destroy(hwpt_id);
1937 }
1938 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)1939 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1940 {
1941 	uint32_t page_size = MOCK_PAGE_SIZE;
1942 	uint32_t hwpt_id;
1943 	uint32_t ioas_id;
1944 
1945 	if (variant->hugepages)
1946 		page_size = MOCK_HUGE_PAGE_SIZE;
1947 
1948 	test_ioctl_ioas_alloc(&ioas_id);
1949 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1950 				     variant->buffer_size, MOCK_APERTURE_START);
1951 
1952 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1953 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1954 
1955 	test_cmd_set_dirty_tracking(hwpt_id, true);
1956 
1957 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1958 				MOCK_APERTURE_START, self->page_size, page_size,
1959 				self->bitmap, self->bitmap_size, 0, _metadata);
1960 
1961 	/* PAGE_SIZE unaligned bitmap */
1962 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1963 				MOCK_APERTURE_START, self->page_size, page_size,
1964 				self->bitmap + MOCK_PAGE_SIZE,
1965 				self->bitmap_size, 0, _metadata);
1966 
1967 	/* u64 unaligned bitmap */
1968 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1969 				MOCK_APERTURE_START, self->page_size, page_size,
1970 				self->bitmap + 0xff1, self->bitmap_size, 0,
1971 				_metadata);
1972 
1973 	test_ioctl_destroy(hwpt_id);
1974 }
1975 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)1976 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1977 {
1978 	uint32_t page_size = MOCK_PAGE_SIZE;
1979 	uint32_t hwpt_id;
1980 	uint32_t ioas_id;
1981 
1982 	if (variant->hugepages)
1983 		page_size = MOCK_HUGE_PAGE_SIZE;
1984 
1985 	test_ioctl_ioas_alloc(&ioas_id);
1986 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1987 				     variant->buffer_size, MOCK_APERTURE_START);
1988 
1989 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1990 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1991 
1992 	test_cmd_set_dirty_tracking(hwpt_id, true);
1993 
1994 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1995 				MOCK_APERTURE_START, self->page_size, page_size,
1996 				self->bitmap, self->bitmap_size,
1997 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1998 				_metadata);
1999 
2000 	/* Unaligned bitmap */
2001 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2002 				MOCK_APERTURE_START, self->page_size, page_size,
2003 				self->bitmap + MOCK_PAGE_SIZE,
2004 				self->bitmap_size,
2005 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2006 				_metadata);
2007 
2008 	/* u64 unaligned bitmap */
2009 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2010 				MOCK_APERTURE_START, self->page_size, page_size,
2011 				self->bitmap + 0xff1, self->bitmap_size,
2012 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2013 				_metadata);
2014 
2015 	test_ioctl_destroy(hwpt_id);
2016 }
2017 
2018 /* VFIO compatibility IOCTLs */
2019 
TEST_F(iommufd,simple_ioctls)2020 TEST_F(iommufd, simple_ioctls)
2021 {
2022 	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2023 	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2024 }
2025 
TEST_F(iommufd,unmap_cmd)2026 TEST_F(iommufd, unmap_cmd)
2027 {
2028 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2029 		.iova = MOCK_APERTURE_START,
2030 		.size = PAGE_SIZE,
2031 	};
2032 
2033 	unmap_cmd.argsz = 1;
2034 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2035 
2036 	unmap_cmd.argsz = sizeof(unmap_cmd);
2037 	unmap_cmd.flags = 1 << 31;
2038 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2039 
2040 	unmap_cmd.flags = 0;
2041 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2042 }
2043 
TEST_F(iommufd,map_cmd)2044 TEST_F(iommufd, map_cmd)
2045 {
2046 	struct vfio_iommu_type1_dma_map map_cmd = {
2047 		.iova = MOCK_APERTURE_START,
2048 		.size = PAGE_SIZE,
2049 		.vaddr = (__u64)buffer,
2050 	};
2051 
2052 	map_cmd.argsz = 1;
2053 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2054 
2055 	map_cmd.argsz = sizeof(map_cmd);
2056 	map_cmd.flags = 1 << 31;
2057 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2058 
2059 	/* Requires a domain to be attached */
2060 	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2061 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2062 }
2063 
TEST_F(iommufd,info_cmd)2064 TEST_F(iommufd, info_cmd)
2065 {
2066 	struct vfio_iommu_type1_info info_cmd = {};
2067 
2068 	/* Invalid argsz */
2069 	info_cmd.argsz = 1;
2070 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2071 
2072 	info_cmd.argsz = sizeof(info_cmd);
2073 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2074 }
2075 
TEST_F(iommufd,set_iommu_cmd)2076 TEST_F(iommufd, set_iommu_cmd)
2077 {
2078 	/* Requires a domain to be attached */
2079 	EXPECT_ERRNO(ENODEV,
2080 		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2081 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2082 }
2083 
TEST_F(iommufd,vfio_ioas)2084 TEST_F(iommufd, vfio_ioas)
2085 {
2086 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2087 		.size = sizeof(vfio_ioas_cmd),
2088 		.op = IOMMU_VFIO_IOAS_GET,
2089 	};
2090 	__u32 ioas_id;
2091 
2092 	/* ENODEV if there is no compat ioas */
2093 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2094 
2095 	/* Invalid id for set */
2096 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2097 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2098 
2099 	/* Valid id for set*/
2100 	test_ioctl_ioas_alloc(&ioas_id);
2101 	vfio_ioas_cmd.ioas_id = ioas_id;
2102 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2103 
2104 	/* Same id comes back from get */
2105 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2106 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2107 	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2108 
2109 	/* Clear works */
2110 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2111 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2112 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2113 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2114 }
2115 
FIXTURE(vfio_compat_mock_domain)2116 FIXTURE(vfio_compat_mock_domain)
2117 {
2118 	int fd;
2119 	uint32_t ioas_id;
2120 };
2121 
FIXTURE_VARIANT(vfio_compat_mock_domain)2122 FIXTURE_VARIANT(vfio_compat_mock_domain)
2123 {
2124 	unsigned int version;
2125 };
2126 
FIXTURE_SETUP(vfio_compat_mock_domain)2127 FIXTURE_SETUP(vfio_compat_mock_domain)
2128 {
2129 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2130 		.size = sizeof(vfio_ioas_cmd),
2131 		.op = IOMMU_VFIO_IOAS_SET,
2132 	};
2133 
2134 	self->fd = open("/dev/iommu", O_RDWR);
2135 	ASSERT_NE(-1, self->fd);
2136 
2137 	/* Create what VFIO would consider a group */
2138 	test_ioctl_ioas_alloc(&self->ioas_id);
2139 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2140 
2141 	/* Attach it to the vfio compat */
2142 	vfio_ioas_cmd.ioas_id = self->ioas_id;
2143 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2144 	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2145 }
2146 
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2147 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2148 {
2149 	teardown_iommufd(self->fd, _metadata);
2150 }
2151 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2152 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2153 {
2154 	.version = VFIO_TYPE1v2_IOMMU,
2155 };
2156 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2157 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2158 {
2159 	.version = VFIO_TYPE1_IOMMU,
2160 };
2161 
TEST_F(vfio_compat_mock_domain,simple_close)2162 TEST_F(vfio_compat_mock_domain, simple_close)
2163 {
2164 }
2165 
TEST_F(vfio_compat_mock_domain,option_huge_pages)2166 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2167 {
2168 	struct iommu_option cmd = {
2169 		.size = sizeof(cmd),
2170 		.option_id = IOMMU_OPTION_HUGE_PAGES,
2171 		.op = IOMMU_OPTION_OP_GET,
2172 		.val64 = 3,
2173 		.object_id = self->ioas_id,
2174 	};
2175 
2176 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2177 	if (variant->version == VFIO_TYPE1_IOMMU) {
2178 		ASSERT_EQ(0, cmd.val64);
2179 	} else {
2180 		ASSERT_EQ(1, cmd.val64);
2181 	}
2182 }
2183 
2184 /*
2185  * Execute an ioctl command stored in buffer and check that the result does not
2186  * overflow memory.
2187  */
is_filled(const void * buf,uint8_t c,size_t len)2188 static bool is_filled(const void *buf, uint8_t c, size_t len)
2189 {
2190 	const uint8_t *cbuf = buf;
2191 
2192 	for (; len; cbuf++, len--)
2193 		if (*cbuf != c)
2194 			return false;
2195 	return true;
2196 }
2197 
2198 #define ioctl_check_buf(fd, cmd)                                         \
2199 	({                                                               \
2200 		size_t _cmd_len = *(__u32 *)buffer;                      \
2201 									 \
2202 		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2203 		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
2204 		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
2205 					  BUFFER_SIZE - _cmd_len));      \
2206 	})
2207 
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2208 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2209 				      struct vfio_iommu_type1_info *info_cmd)
2210 {
2211 	const struct vfio_info_cap_header *cap;
2212 
2213 	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2214 	cap = buffer + info_cmd->cap_offset;
2215 	while (true) {
2216 		size_t cap_size;
2217 
2218 		if (cap->next)
2219 			cap_size = (buffer + cap->next) - (void *)cap;
2220 		else
2221 			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2222 
2223 		switch (cap->id) {
2224 		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2225 			struct vfio_iommu_type1_info_cap_iova_range *data =
2226 				(void *)cap;
2227 
2228 			ASSERT_EQ(1, data->header.version);
2229 			ASSERT_EQ(1, data->nr_iovas);
2230 			EXPECT_EQ(MOCK_APERTURE_START,
2231 				  data->iova_ranges[0].start);
2232 			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2233 			break;
2234 		}
2235 		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2236 			struct vfio_iommu_type1_info_dma_avail *data =
2237 				(void *)cap;
2238 
2239 			ASSERT_EQ(1, data->header.version);
2240 			ASSERT_EQ(sizeof(*data), cap_size);
2241 			break;
2242 		}
2243 		default:
2244 			ASSERT_EQ(false, true);
2245 			break;
2246 		}
2247 		if (!cap->next)
2248 			break;
2249 
2250 		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2251 		ASSERT_GE(buffer + cap->next, (void *)cap);
2252 		cap = buffer + cap->next;
2253 	}
2254 }
2255 
TEST_F(vfio_compat_mock_domain,get_info)2256 TEST_F(vfio_compat_mock_domain, get_info)
2257 {
2258 	struct vfio_iommu_type1_info *info_cmd = buffer;
2259 	unsigned int i;
2260 	size_t caplen;
2261 
2262 	/* Pre-cap ABI */
2263 	*info_cmd = (struct vfio_iommu_type1_info){
2264 		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2265 	};
2266 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2267 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2268 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2269 		  info_cmd->flags);
2270 
2271 	/* Read the cap chain size */
2272 	*info_cmd = (struct vfio_iommu_type1_info){
2273 		.argsz = sizeof(*info_cmd),
2274 	};
2275 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2276 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2277 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2278 		  info_cmd->flags);
2279 	ASSERT_EQ(0, info_cmd->cap_offset);
2280 	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2281 
2282 	/* Read the caps, kernel should never create a corrupted caps */
2283 	caplen = info_cmd->argsz;
2284 	for (i = sizeof(*info_cmd); i < caplen; i++) {
2285 		*info_cmd = (struct vfio_iommu_type1_info){
2286 			.argsz = i,
2287 		};
2288 		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2289 		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2290 			  info_cmd->flags);
2291 		if (!info_cmd->cap_offset)
2292 			continue;
2293 		check_vfio_info_cap_chain(_metadata, info_cmd);
2294 	}
2295 }
2296 
shuffle_array(unsigned long * array,size_t nelms)2297 static void shuffle_array(unsigned long *array, size_t nelms)
2298 {
2299 	unsigned int i;
2300 
2301 	/* Shuffle */
2302 	for (i = 0; i != nelms; i++) {
2303 		unsigned long tmp = array[i];
2304 		unsigned int other = rand() % (nelms - i);
2305 
2306 		array[i] = array[other];
2307 		array[other] = tmp;
2308 	}
2309 }
2310 
TEST_F(vfio_compat_mock_domain,map)2311 TEST_F(vfio_compat_mock_domain, map)
2312 {
2313 	struct vfio_iommu_type1_dma_map map_cmd = {
2314 		.argsz = sizeof(map_cmd),
2315 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2316 		.vaddr = (uintptr_t)buffer,
2317 		.size = BUFFER_SIZE,
2318 		.iova = MOCK_APERTURE_START,
2319 	};
2320 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2321 		.argsz = sizeof(unmap_cmd),
2322 		.size = BUFFER_SIZE,
2323 		.iova = MOCK_APERTURE_START,
2324 	};
2325 	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2326 	unsigned int i;
2327 
2328 	/* Simple map/unmap */
2329 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2330 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2331 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2332 
2333 	/* UNMAP_FLAG_ALL requires 0 iova/size */
2334 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2335 	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2336 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2337 
2338 	unmap_cmd.iova = 0;
2339 	unmap_cmd.size = 0;
2340 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2341 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2342 
2343 	/* Small pages */
2344 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2345 		map_cmd.iova = pages_iova[i] =
2346 			MOCK_APERTURE_START + i * PAGE_SIZE;
2347 		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2348 		map_cmd.size = PAGE_SIZE;
2349 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2350 	}
2351 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2352 
2353 	unmap_cmd.flags = 0;
2354 	unmap_cmd.size = PAGE_SIZE;
2355 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2356 		unmap_cmd.iova = pages_iova[i];
2357 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2358 	}
2359 }
2360 
TEST_F(vfio_compat_mock_domain,huge_map)2361 TEST_F(vfio_compat_mock_domain, huge_map)
2362 {
2363 	size_t buf_size = HUGEPAGE_SIZE * 2;
2364 	struct vfio_iommu_type1_dma_map map_cmd = {
2365 		.argsz = sizeof(map_cmd),
2366 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2367 		.size = buf_size,
2368 		.iova = MOCK_APERTURE_START,
2369 	};
2370 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2371 		.argsz = sizeof(unmap_cmd),
2372 	};
2373 	unsigned long pages_iova[16];
2374 	unsigned int i;
2375 	void *buf;
2376 
2377 	/* Test huge pages and splitting */
2378 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2379 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2380 		   0);
2381 	ASSERT_NE(MAP_FAILED, buf);
2382 	map_cmd.vaddr = (uintptr_t)buf;
2383 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2384 
2385 	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2386 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2387 		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2388 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2389 
2390 	/* type1 mode can cut up larger mappings, type1v2 always fails */
2391 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2392 		unmap_cmd.iova = pages_iova[i];
2393 		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2394 		if (variant->version == VFIO_TYPE1_IOMMU) {
2395 			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2396 					   &unmap_cmd));
2397 		} else {
2398 			EXPECT_ERRNO(ENOENT,
2399 				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2400 					   &unmap_cmd));
2401 		}
2402 	}
2403 }
2404 
2405 TEST_HARNESS_MAIN
2406