• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4  * the linux kernel to help device drivers mirror a process address space in
5  * the device. This allows the device to use the same address space which
6  * makes communication and data exchange a lot easier.
7  *
8  * This framework's sole purpose is to exercise various code paths inside
9  * the kernel to make sure that HMM performs as expected and to flush out any
10  * bugs.
11  */
12 
13 #include "../kselftest_harness.h"
14 
15 #include <errno.h>
16 #include <fcntl.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <strings.h>
22 #include <time.h>
23 #include <pthread.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 
29 
30 /*
31  * This is a private UAPI to the kernel test module so it isn't exported
32  * in the usual include/uapi/... directory.
33  */
34 #include <lib/test_hmm_uapi.h>
35 #include <mm/gup_test.h>
36 
37 struct hmm_buffer {
38 	void		*ptr;
39 	void		*mirror;
40 	unsigned long	size;
41 	int		fd;
42 	uint64_t	cpages;
43 	uint64_t	faults;
44 };
45 
46 enum {
47 	HMM_PRIVATE_DEVICE_ONE,
48 	HMM_PRIVATE_DEVICE_TWO,
49 	HMM_COHERENCE_DEVICE_ONE,
50 	HMM_COHERENCE_DEVICE_TWO,
51 };
52 
53 #define TWOMEG		(1 << 21)
54 #define HMM_BUFFER_SIZE (1024 << 12)
55 #define HMM_PATH_MAX    64
56 #define NTIMES		10
57 
58 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
59 /* Just the flags we need, copied from mm.h: */
60 
61 #ifndef FOLL_WRITE
62 #define FOLL_WRITE	0x01	/* check pte is writable */
63 #endif
64 
65 #ifndef FOLL_LONGTERM
66 #define FOLL_LONGTERM   0x100 /* mapping lifetime is indefinite */
67 #endif
FIXTURE(hmm)68 FIXTURE(hmm)
69 {
70 	int		fd;
71 	unsigned int	page_size;
72 	unsigned int	page_shift;
73 };
74 
FIXTURE_VARIANT(hmm)75 FIXTURE_VARIANT(hmm)
76 {
77 	int     device_number;
78 };
79 
FIXTURE_VARIANT_ADD(hmm,hmm_device_private)80 FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
81 {
82 	.device_number = HMM_PRIVATE_DEVICE_ONE,
83 };
84 
FIXTURE_VARIANT_ADD(hmm,hmm_device_coherent)85 FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
86 {
87 	.device_number = HMM_COHERENCE_DEVICE_ONE,
88 };
89 
FIXTURE(hmm2)90 FIXTURE(hmm2)
91 {
92 	int		fd0;
93 	int		fd1;
94 	unsigned int	page_size;
95 	unsigned int	page_shift;
96 };
97 
FIXTURE_VARIANT(hmm2)98 FIXTURE_VARIANT(hmm2)
99 {
100 	int     device_number0;
101 	int     device_number1;
102 };
103 
FIXTURE_VARIANT_ADD(hmm2,hmm2_device_private)104 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
105 {
106 	.device_number0 = HMM_PRIVATE_DEVICE_ONE,
107 	.device_number1 = HMM_PRIVATE_DEVICE_TWO,
108 };
109 
FIXTURE_VARIANT_ADD(hmm2,hmm2_device_coherent)110 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
111 {
112 	.device_number0 = HMM_COHERENCE_DEVICE_ONE,
113 	.device_number1 = HMM_COHERENCE_DEVICE_TWO,
114 };
115 
hmm_open(int unit)116 static int hmm_open(int unit)
117 {
118 	char pathname[HMM_PATH_MAX];
119 	int fd;
120 
121 	snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
122 	fd = open(pathname, O_RDWR, 0);
123 	if (fd < 0)
124 		fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
125 			pathname);
126 	return fd;
127 }
128 
hmm_is_coherent_type(int dev_num)129 static bool hmm_is_coherent_type(int dev_num)
130 {
131 	return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
132 }
133 
FIXTURE_SETUP(hmm)134 FIXTURE_SETUP(hmm)
135 {
136 	self->page_size = sysconf(_SC_PAGE_SIZE);
137 	self->page_shift = ffs(self->page_size) - 1;
138 
139 	self->fd = hmm_open(variant->device_number);
140 	if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
141 		SKIP(exit(0), "DEVICE_COHERENT not available");
142 	ASSERT_GE(self->fd, 0);
143 }
144 
FIXTURE_SETUP(hmm2)145 FIXTURE_SETUP(hmm2)
146 {
147 	self->page_size = sysconf(_SC_PAGE_SIZE);
148 	self->page_shift = ffs(self->page_size) - 1;
149 
150 	self->fd0 = hmm_open(variant->device_number0);
151 	if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
152 		SKIP(exit(0), "DEVICE_COHERENT not available");
153 	ASSERT_GE(self->fd0, 0);
154 	self->fd1 = hmm_open(variant->device_number1);
155 	ASSERT_GE(self->fd1, 0);
156 }
157 
FIXTURE_TEARDOWN(hmm)158 FIXTURE_TEARDOWN(hmm)
159 {
160 	int ret = close(self->fd);
161 
162 	ASSERT_EQ(ret, 0);
163 	self->fd = -1;
164 }
165 
FIXTURE_TEARDOWN(hmm2)166 FIXTURE_TEARDOWN(hmm2)
167 {
168 	int ret = close(self->fd0);
169 
170 	ASSERT_EQ(ret, 0);
171 	self->fd0 = -1;
172 
173 	ret = close(self->fd1);
174 	ASSERT_EQ(ret, 0);
175 	self->fd1 = -1;
176 }
177 
hmm_dmirror_cmd(int fd,unsigned long request,struct hmm_buffer * buffer,unsigned long npages)178 static int hmm_dmirror_cmd(int fd,
179 			   unsigned long request,
180 			   struct hmm_buffer *buffer,
181 			   unsigned long npages)
182 {
183 	struct hmm_dmirror_cmd cmd;
184 	int ret;
185 
186 	/* Simulate a device reading system memory. */
187 	cmd.addr = (__u64)buffer->ptr;
188 	cmd.ptr = (__u64)buffer->mirror;
189 	cmd.npages = npages;
190 
191 	for (;;) {
192 		ret = ioctl(fd, request, &cmd);
193 		if (ret == 0)
194 			break;
195 		if (errno == EINTR)
196 			continue;
197 		return -errno;
198 	}
199 	buffer->cpages = cmd.cpages;
200 	buffer->faults = cmd.faults;
201 
202 	return 0;
203 }
204 
hmm_buffer_free(struct hmm_buffer * buffer)205 static void hmm_buffer_free(struct hmm_buffer *buffer)
206 {
207 	if (buffer == NULL)
208 		return;
209 
210 	if (buffer->ptr)
211 		munmap(buffer->ptr, buffer->size);
212 	free(buffer->mirror);
213 	free(buffer);
214 }
215 
216 /*
217  * Create a temporary file that will be deleted on close.
218  */
hmm_create_file(unsigned long size)219 static int hmm_create_file(unsigned long size)
220 {
221 	char path[HMM_PATH_MAX];
222 	int fd;
223 
224 	strcpy(path, "/tmp");
225 	fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
226 	if (fd >= 0) {
227 		int r;
228 
229 		do {
230 			r = ftruncate(fd, size);
231 		} while (r == -1 && errno == EINTR);
232 		if (!r)
233 			return fd;
234 		close(fd);
235 	}
236 	return -1;
237 }
238 
239 /*
240  * Return a random unsigned number.
241  */
hmm_random(void)242 static unsigned int hmm_random(void)
243 {
244 	static int fd = -1;
245 	unsigned int r;
246 
247 	if (fd < 0) {
248 		fd = open("/dev/urandom", O_RDONLY);
249 		if (fd < 0) {
250 			fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
251 					__FILE__, __LINE__);
252 			return ~0U;
253 		}
254 	}
255 	read(fd, &r, sizeof(r));
256 	return r;
257 }
258 
hmm_nanosleep(unsigned int n)259 static void hmm_nanosleep(unsigned int n)
260 {
261 	struct timespec t;
262 
263 	t.tv_sec = 0;
264 	t.tv_nsec = n;
265 	nanosleep(&t, NULL);
266 }
267 
hmm_migrate_sys_to_dev(int fd,struct hmm_buffer * buffer,unsigned long npages)268 static int hmm_migrate_sys_to_dev(int fd,
269 				   struct hmm_buffer *buffer,
270 				   unsigned long npages)
271 {
272 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
273 }
274 
hmm_migrate_dev_to_sys(int fd,struct hmm_buffer * buffer,unsigned long npages)275 static int hmm_migrate_dev_to_sys(int fd,
276 				   struct hmm_buffer *buffer,
277 				   unsigned long npages)
278 {
279 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
280 }
281 
282 /*
283  * Simple NULL test of device open/close.
284  */
TEST_F(hmm,open_close)285 TEST_F(hmm, open_close)
286 {
287 }
288 
289 /*
290  * Read private anonymous memory.
291  */
TEST_F(hmm,anon_read)292 TEST_F(hmm, anon_read)
293 {
294 	struct hmm_buffer *buffer;
295 	unsigned long npages;
296 	unsigned long size;
297 	unsigned long i;
298 	int *ptr;
299 	int ret;
300 	int val;
301 
302 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
303 	ASSERT_NE(npages, 0);
304 	size = npages << self->page_shift;
305 
306 	buffer = malloc(sizeof(*buffer));
307 	ASSERT_NE(buffer, NULL);
308 
309 	buffer->fd = -1;
310 	buffer->size = size;
311 	buffer->mirror = malloc(size);
312 	ASSERT_NE(buffer->mirror, NULL);
313 
314 	buffer->ptr = mmap(NULL, size,
315 			   PROT_READ | PROT_WRITE,
316 			   MAP_PRIVATE | MAP_ANONYMOUS,
317 			   buffer->fd, 0);
318 	ASSERT_NE(buffer->ptr, MAP_FAILED);
319 
320 	/*
321 	 * Initialize buffer in system memory but leave the first two pages
322 	 * zero (pte_none and pfn_zero).
323 	 */
324 	i = 2 * self->page_size / sizeof(*ptr);
325 	for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
326 		ptr[i] = i;
327 
328 	/* Set buffer permission to read-only. */
329 	ret = mprotect(buffer->ptr, size, PROT_READ);
330 	ASSERT_EQ(ret, 0);
331 
332 	/* Populate the CPU page table with a special zero page. */
333 	val = *(int *)(buffer->ptr + self->page_size);
334 	ASSERT_EQ(val, 0);
335 
336 	/* Simulate a device reading system memory. */
337 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
338 	ASSERT_EQ(ret, 0);
339 	ASSERT_EQ(buffer->cpages, npages);
340 	ASSERT_EQ(buffer->faults, 1);
341 
342 	/* Check what the device read. */
343 	ptr = buffer->mirror;
344 	for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
345 		ASSERT_EQ(ptr[i], 0);
346 	for (; i < size / sizeof(*ptr); ++i)
347 		ASSERT_EQ(ptr[i], i);
348 
349 	hmm_buffer_free(buffer);
350 }
351 
352 /*
353  * Read private anonymous memory which has been protected with
354  * mprotect() PROT_NONE.
355  */
TEST_F(hmm,anon_read_prot)356 TEST_F(hmm, anon_read_prot)
357 {
358 	struct hmm_buffer *buffer;
359 	unsigned long npages;
360 	unsigned long size;
361 	unsigned long i;
362 	int *ptr;
363 	int ret;
364 
365 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
366 	ASSERT_NE(npages, 0);
367 	size = npages << self->page_shift;
368 
369 	buffer = malloc(sizeof(*buffer));
370 	ASSERT_NE(buffer, NULL);
371 
372 	buffer->fd = -1;
373 	buffer->size = size;
374 	buffer->mirror = malloc(size);
375 	ASSERT_NE(buffer->mirror, NULL);
376 
377 	buffer->ptr = mmap(NULL, size,
378 			   PROT_READ | PROT_WRITE,
379 			   MAP_PRIVATE | MAP_ANONYMOUS,
380 			   buffer->fd, 0);
381 	ASSERT_NE(buffer->ptr, MAP_FAILED);
382 
383 	/* Initialize buffer in system memory. */
384 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
385 		ptr[i] = i;
386 
387 	/* Initialize mirror buffer so we can verify it isn't written. */
388 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
389 		ptr[i] = -i;
390 
391 	/* Protect buffer from reading. */
392 	ret = mprotect(buffer->ptr, size, PROT_NONE);
393 	ASSERT_EQ(ret, 0);
394 
395 	/* Simulate a device reading system memory. */
396 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
397 	ASSERT_EQ(ret, -EFAULT);
398 
399 	/* Allow CPU to read the buffer so we can check it. */
400 	ret = mprotect(buffer->ptr, size, PROT_READ);
401 	ASSERT_EQ(ret, 0);
402 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
403 		ASSERT_EQ(ptr[i], i);
404 
405 	/* Check what the device read. */
406 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
407 		ASSERT_EQ(ptr[i], -i);
408 
409 	hmm_buffer_free(buffer);
410 }
411 
412 /*
413  * Write private anonymous memory.
414  */
TEST_F(hmm,anon_write)415 TEST_F(hmm, anon_write)
416 {
417 	struct hmm_buffer *buffer;
418 	unsigned long npages;
419 	unsigned long size;
420 	unsigned long i;
421 	int *ptr;
422 	int ret;
423 
424 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
425 	ASSERT_NE(npages, 0);
426 	size = npages << self->page_shift;
427 
428 	buffer = malloc(sizeof(*buffer));
429 	ASSERT_NE(buffer, NULL);
430 
431 	buffer->fd = -1;
432 	buffer->size = size;
433 	buffer->mirror = malloc(size);
434 	ASSERT_NE(buffer->mirror, NULL);
435 
436 	buffer->ptr = mmap(NULL, size,
437 			   PROT_READ | PROT_WRITE,
438 			   MAP_PRIVATE | MAP_ANONYMOUS,
439 			   buffer->fd, 0);
440 	ASSERT_NE(buffer->ptr, MAP_FAILED);
441 
442 	/* Initialize data that the device will write to buffer->ptr. */
443 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
444 		ptr[i] = i;
445 
446 	/* Simulate a device writing system memory. */
447 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
448 	ASSERT_EQ(ret, 0);
449 	ASSERT_EQ(buffer->cpages, npages);
450 	ASSERT_EQ(buffer->faults, 1);
451 
452 	/* Check what the device wrote. */
453 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
454 		ASSERT_EQ(ptr[i], i);
455 
456 	hmm_buffer_free(buffer);
457 }
458 
459 /*
460  * Write private anonymous memory which has been protected with
461  * mprotect() PROT_READ.
462  */
TEST_F(hmm,anon_write_prot)463 TEST_F(hmm, anon_write_prot)
464 {
465 	struct hmm_buffer *buffer;
466 	unsigned long npages;
467 	unsigned long size;
468 	unsigned long i;
469 	int *ptr;
470 	int ret;
471 
472 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
473 	ASSERT_NE(npages, 0);
474 	size = npages << self->page_shift;
475 
476 	buffer = malloc(sizeof(*buffer));
477 	ASSERT_NE(buffer, NULL);
478 
479 	buffer->fd = -1;
480 	buffer->size = size;
481 	buffer->mirror = malloc(size);
482 	ASSERT_NE(buffer->mirror, NULL);
483 
484 	buffer->ptr = mmap(NULL, size,
485 			   PROT_READ,
486 			   MAP_PRIVATE | MAP_ANONYMOUS,
487 			   buffer->fd, 0);
488 	ASSERT_NE(buffer->ptr, MAP_FAILED);
489 
490 	/* Simulate a device reading a zero page of memory. */
491 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
492 	ASSERT_EQ(ret, 0);
493 	ASSERT_EQ(buffer->cpages, 1);
494 	ASSERT_EQ(buffer->faults, 1);
495 
496 	/* Initialize data that the device will write to buffer->ptr. */
497 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
498 		ptr[i] = i;
499 
500 	/* Simulate a device writing system memory. */
501 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
502 	ASSERT_EQ(ret, -EPERM);
503 
504 	/* Check what the device wrote. */
505 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
506 		ASSERT_EQ(ptr[i], 0);
507 
508 	/* Now allow writing and see that the zero page is replaced. */
509 	ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
510 	ASSERT_EQ(ret, 0);
511 
512 	/* Simulate a device writing system memory. */
513 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
514 	ASSERT_EQ(ret, 0);
515 	ASSERT_EQ(buffer->cpages, npages);
516 	ASSERT_EQ(buffer->faults, 1);
517 
518 	/* Check what the device wrote. */
519 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
520 		ASSERT_EQ(ptr[i], i);
521 
522 	hmm_buffer_free(buffer);
523 }
524 
525 /*
526  * Check that a device writing an anonymous private mapping
527  * will copy-on-write if a child process inherits the mapping.
528  */
TEST_F(hmm,anon_write_child)529 TEST_F(hmm, anon_write_child)
530 {
531 	struct hmm_buffer *buffer;
532 	unsigned long npages;
533 	unsigned long size;
534 	unsigned long i;
535 	int *ptr;
536 	pid_t pid;
537 	int child_fd;
538 	int ret;
539 
540 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
541 	ASSERT_NE(npages, 0);
542 	size = npages << self->page_shift;
543 
544 	buffer = malloc(sizeof(*buffer));
545 	ASSERT_NE(buffer, NULL);
546 
547 	buffer->fd = -1;
548 	buffer->size = size;
549 	buffer->mirror = malloc(size);
550 	ASSERT_NE(buffer->mirror, NULL);
551 
552 	buffer->ptr = mmap(NULL, size,
553 			   PROT_READ | PROT_WRITE,
554 			   MAP_PRIVATE | MAP_ANONYMOUS,
555 			   buffer->fd, 0);
556 	ASSERT_NE(buffer->ptr, MAP_FAILED);
557 
558 	/* Initialize buffer->ptr so we can tell if it is written. */
559 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
560 		ptr[i] = i;
561 
562 	/* Initialize data that the device will write to buffer->ptr. */
563 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
564 		ptr[i] = -i;
565 
566 	pid = fork();
567 	if (pid == -1)
568 		ASSERT_EQ(pid, 0);
569 	if (pid != 0) {
570 		waitpid(pid, &ret, 0);
571 		ASSERT_EQ(WIFEXITED(ret), 1);
572 
573 		/* Check that the parent's buffer did not change. */
574 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
575 			ASSERT_EQ(ptr[i], i);
576 		return;
577 	}
578 
579 	/* Check that we see the parent's values. */
580 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
581 		ASSERT_EQ(ptr[i], i);
582 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
583 		ASSERT_EQ(ptr[i], -i);
584 
585 	/* The child process needs its own mirror to its own mm. */
586 	child_fd = hmm_open(0);
587 	ASSERT_GE(child_fd, 0);
588 
589 	/* Simulate a device writing system memory. */
590 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
591 	ASSERT_EQ(ret, 0);
592 	ASSERT_EQ(buffer->cpages, npages);
593 	ASSERT_EQ(buffer->faults, 1);
594 
595 	/* Check what the device wrote. */
596 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
597 		ASSERT_EQ(ptr[i], -i);
598 
599 	close(child_fd);
600 	exit(0);
601 }
602 
603 /*
604  * Check that a device writing an anonymous shared mapping
605  * will not copy-on-write if a child process inherits the mapping.
606  */
TEST_F(hmm,anon_write_child_shared)607 TEST_F(hmm, anon_write_child_shared)
608 {
609 	struct hmm_buffer *buffer;
610 	unsigned long npages;
611 	unsigned long size;
612 	unsigned long i;
613 	int *ptr;
614 	pid_t pid;
615 	int child_fd;
616 	int ret;
617 
618 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
619 	ASSERT_NE(npages, 0);
620 	size = npages << self->page_shift;
621 
622 	buffer = malloc(sizeof(*buffer));
623 	ASSERT_NE(buffer, NULL);
624 
625 	buffer->fd = -1;
626 	buffer->size = size;
627 	buffer->mirror = malloc(size);
628 	ASSERT_NE(buffer->mirror, NULL);
629 
630 	buffer->ptr = mmap(NULL, size,
631 			   PROT_READ | PROT_WRITE,
632 			   MAP_SHARED | MAP_ANONYMOUS,
633 			   buffer->fd, 0);
634 	ASSERT_NE(buffer->ptr, MAP_FAILED);
635 
636 	/* Initialize buffer->ptr so we can tell if it is written. */
637 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
638 		ptr[i] = i;
639 
640 	/* Initialize data that the device will write to buffer->ptr. */
641 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
642 		ptr[i] = -i;
643 
644 	pid = fork();
645 	if (pid == -1)
646 		ASSERT_EQ(pid, 0);
647 	if (pid != 0) {
648 		waitpid(pid, &ret, 0);
649 		ASSERT_EQ(WIFEXITED(ret), 1);
650 
651 		/* Check that the parent's buffer did change. */
652 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
653 			ASSERT_EQ(ptr[i], -i);
654 		return;
655 	}
656 
657 	/* Check that we see the parent's values. */
658 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
659 		ASSERT_EQ(ptr[i], i);
660 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
661 		ASSERT_EQ(ptr[i], -i);
662 
663 	/* The child process needs its own mirror to its own mm. */
664 	child_fd = hmm_open(0);
665 	ASSERT_GE(child_fd, 0);
666 
667 	/* Simulate a device writing system memory. */
668 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
669 	ASSERT_EQ(ret, 0);
670 	ASSERT_EQ(buffer->cpages, npages);
671 	ASSERT_EQ(buffer->faults, 1);
672 
673 	/* Check what the device wrote. */
674 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
675 		ASSERT_EQ(ptr[i], -i);
676 
677 	close(child_fd);
678 	exit(0);
679 }
680 
681 /*
682  * Write private anonymous huge page.
683  */
TEST_F(hmm,anon_write_huge)684 TEST_F(hmm, anon_write_huge)
685 {
686 	struct hmm_buffer *buffer;
687 	unsigned long npages;
688 	unsigned long size;
689 	unsigned long i;
690 	void *old_ptr;
691 	void *map;
692 	int *ptr;
693 	int ret;
694 
695 	size = 2 * TWOMEG;
696 
697 	buffer = malloc(sizeof(*buffer));
698 	ASSERT_NE(buffer, NULL);
699 
700 	buffer->fd = -1;
701 	buffer->size = size;
702 	buffer->mirror = malloc(size);
703 	ASSERT_NE(buffer->mirror, NULL);
704 
705 	buffer->ptr = mmap(NULL, size,
706 			   PROT_READ | PROT_WRITE,
707 			   MAP_PRIVATE | MAP_ANONYMOUS,
708 			   buffer->fd, 0);
709 	ASSERT_NE(buffer->ptr, MAP_FAILED);
710 
711 	size = TWOMEG;
712 	npages = size >> self->page_shift;
713 	map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
714 	ret = madvise(map, size, MADV_HUGEPAGE);
715 	ASSERT_EQ(ret, 0);
716 	old_ptr = buffer->ptr;
717 	buffer->ptr = map;
718 
719 	/* Initialize data that the device will write to buffer->ptr. */
720 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
721 		ptr[i] = i;
722 
723 	/* Simulate a device writing system memory. */
724 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
725 	ASSERT_EQ(ret, 0);
726 	ASSERT_EQ(buffer->cpages, npages);
727 	ASSERT_EQ(buffer->faults, 1);
728 
729 	/* Check what the device wrote. */
730 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
731 		ASSERT_EQ(ptr[i], i);
732 
733 	buffer->ptr = old_ptr;
734 	hmm_buffer_free(buffer);
735 }
736 
737 /*
738  * Read numeric data from raw and tagged kernel status files.  Used to read
739  * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
740  */
file_read_ulong(char * file,const char * tag)741 static long file_read_ulong(char *file, const char *tag)
742 {
743 	int fd;
744 	char buf[2048];
745 	int len;
746 	char *p, *q;
747 	long val;
748 
749 	fd = open(file, O_RDONLY);
750 	if (fd < 0) {
751 		/* Error opening the file */
752 		return -1;
753 	}
754 
755 	len = read(fd, buf, sizeof(buf));
756 	close(fd);
757 	if (len < 0) {
758 		/* Error in reading the file */
759 		return -1;
760 	}
761 	if (len == sizeof(buf)) {
762 		/* Error file is too large */
763 		return -1;
764 	}
765 	buf[len] = '\0';
766 
767 	/* Search for a tag if provided */
768 	if (tag) {
769 		p = strstr(buf, tag);
770 		if (!p)
771 			return -1; /* looks like the line we want isn't there */
772 		p += strlen(tag);
773 	} else
774 		p = buf;
775 
776 	val = strtol(p, &q, 0);
777 	if (*q != ' ') {
778 		/* Error parsing the file */
779 		return -1;
780 	}
781 
782 	return val;
783 }
784 
785 /*
786  * Write huge TLBFS page.
787  */
TEST_F(hmm,anon_write_hugetlbfs)788 TEST_F(hmm, anon_write_hugetlbfs)
789 {
790 	struct hmm_buffer *buffer;
791 	unsigned long npages;
792 	unsigned long size;
793 	unsigned long default_hsize;
794 	unsigned long i;
795 	int *ptr;
796 	int ret;
797 
798 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
799 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
800 		SKIP(return, "Huge page size could not be determined");
801 	default_hsize = default_hsize*1024; /* KB to B */
802 
803 	size = ALIGN(TWOMEG, default_hsize);
804 	npages = size >> self->page_shift;
805 
806 	buffer = malloc(sizeof(*buffer));
807 	ASSERT_NE(buffer, NULL);
808 
809 	buffer->ptr = mmap(NULL, size,
810 				   PROT_READ | PROT_WRITE,
811 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
812 				   -1, 0);
813 	if (buffer->ptr == MAP_FAILED) {
814 		free(buffer);
815 		SKIP(return, "Huge page could not be allocated");
816 	}
817 
818 	buffer->fd = -1;
819 	buffer->size = size;
820 	buffer->mirror = malloc(size);
821 	ASSERT_NE(buffer->mirror, NULL);
822 
823 	/* Initialize data that the device will write to buffer->ptr. */
824 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
825 		ptr[i] = i;
826 
827 	/* Simulate a device writing system memory. */
828 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
829 	ASSERT_EQ(ret, 0);
830 	ASSERT_EQ(buffer->cpages, npages);
831 	ASSERT_EQ(buffer->faults, 1);
832 
833 	/* Check what the device wrote. */
834 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
835 		ASSERT_EQ(ptr[i], i);
836 
837 	munmap(buffer->ptr, buffer->size);
838 	buffer->ptr = NULL;
839 	hmm_buffer_free(buffer);
840 }
841 
842 /*
843  * Read mmap'ed file memory.
844  */
TEST_F(hmm,file_read)845 TEST_F(hmm, file_read)
846 {
847 	struct hmm_buffer *buffer;
848 	unsigned long npages;
849 	unsigned long size;
850 	unsigned long i;
851 	int *ptr;
852 	int ret;
853 	int fd;
854 	ssize_t len;
855 
856 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
857 	ASSERT_NE(npages, 0);
858 	size = npages << self->page_shift;
859 
860 	fd = hmm_create_file(size);
861 	ASSERT_GE(fd, 0);
862 
863 	buffer = malloc(sizeof(*buffer));
864 	ASSERT_NE(buffer, NULL);
865 
866 	buffer->fd = fd;
867 	buffer->size = size;
868 	buffer->mirror = malloc(size);
869 	ASSERT_NE(buffer->mirror, NULL);
870 
871 	/* Write initial contents of the file. */
872 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
873 		ptr[i] = i;
874 	len = pwrite(fd, buffer->mirror, size, 0);
875 	ASSERT_EQ(len, size);
876 	memset(buffer->mirror, 0, size);
877 
878 	buffer->ptr = mmap(NULL, size,
879 			   PROT_READ,
880 			   MAP_SHARED,
881 			   buffer->fd, 0);
882 	ASSERT_NE(buffer->ptr, MAP_FAILED);
883 
884 	/* Simulate a device reading system memory. */
885 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
886 	ASSERT_EQ(ret, 0);
887 	ASSERT_EQ(buffer->cpages, npages);
888 	ASSERT_EQ(buffer->faults, 1);
889 
890 	/* Check what the device read. */
891 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
892 		ASSERT_EQ(ptr[i], i);
893 
894 	hmm_buffer_free(buffer);
895 }
896 
897 /*
898  * Write mmap'ed file memory.
899  */
TEST_F(hmm,file_write)900 TEST_F(hmm, file_write)
901 {
902 	struct hmm_buffer *buffer;
903 	unsigned long npages;
904 	unsigned long size;
905 	unsigned long i;
906 	int *ptr;
907 	int ret;
908 	int fd;
909 	ssize_t len;
910 
911 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
912 	ASSERT_NE(npages, 0);
913 	size = npages << self->page_shift;
914 
915 	fd = hmm_create_file(size);
916 	ASSERT_GE(fd, 0);
917 
918 	buffer = malloc(sizeof(*buffer));
919 	ASSERT_NE(buffer, NULL);
920 
921 	buffer->fd = fd;
922 	buffer->size = size;
923 	buffer->mirror = malloc(size);
924 	ASSERT_NE(buffer->mirror, NULL);
925 
926 	buffer->ptr = mmap(NULL, size,
927 			   PROT_READ | PROT_WRITE,
928 			   MAP_SHARED,
929 			   buffer->fd, 0);
930 	ASSERT_NE(buffer->ptr, MAP_FAILED);
931 
932 	/* Initialize data that the device will write to buffer->ptr. */
933 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
934 		ptr[i] = i;
935 
936 	/* Simulate a device writing system memory. */
937 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
938 	ASSERT_EQ(ret, 0);
939 	ASSERT_EQ(buffer->cpages, npages);
940 	ASSERT_EQ(buffer->faults, 1);
941 
942 	/* Check what the device wrote. */
943 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
944 		ASSERT_EQ(ptr[i], i);
945 
946 	/* Check that the device also wrote the file. */
947 	len = pread(fd, buffer->mirror, size, 0);
948 	ASSERT_EQ(len, size);
949 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
950 		ASSERT_EQ(ptr[i], i);
951 
952 	hmm_buffer_free(buffer);
953 }
954 
955 /*
956  * Migrate anonymous memory to device private memory.
957  */
TEST_F(hmm,migrate)958 TEST_F(hmm, migrate)
959 {
960 	struct hmm_buffer *buffer;
961 	unsigned long npages;
962 	unsigned long size;
963 	unsigned long i;
964 	int *ptr;
965 	int ret;
966 
967 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
968 	ASSERT_NE(npages, 0);
969 	size = npages << self->page_shift;
970 
971 	buffer = malloc(sizeof(*buffer));
972 	ASSERT_NE(buffer, NULL);
973 
974 	buffer->fd = -1;
975 	buffer->size = size;
976 	buffer->mirror = malloc(size);
977 	ASSERT_NE(buffer->mirror, NULL);
978 
979 	buffer->ptr = mmap(NULL, size,
980 			   PROT_READ | PROT_WRITE,
981 			   MAP_PRIVATE | MAP_ANONYMOUS,
982 			   buffer->fd, 0);
983 	ASSERT_NE(buffer->ptr, MAP_FAILED);
984 
985 	/* Initialize buffer in system memory. */
986 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
987 		ptr[i] = i;
988 
989 	/* Migrate memory to device. */
990 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
991 	ASSERT_EQ(ret, 0);
992 	ASSERT_EQ(buffer->cpages, npages);
993 
994 	/* Check what the device read. */
995 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
996 		ASSERT_EQ(ptr[i], i);
997 
998 	hmm_buffer_free(buffer);
999 }
1000 
1001 /*
1002  * Migrate anonymous memory to device private memory and fault some of it back
1003  * to system memory, then try migrating the resulting mix of system and device
1004  * private memory to the device.
1005  */
TEST_F(hmm,migrate_fault)1006 TEST_F(hmm, migrate_fault)
1007 {
1008 	struct hmm_buffer *buffer;
1009 	unsigned long npages;
1010 	unsigned long size;
1011 	unsigned long i;
1012 	int *ptr;
1013 	int ret;
1014 
1015 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1016 	ASSERT_NE(npages, 0);
1017 	size = npages << self->page_shift;
1018 
1019 	buffer = malloc(sizeof(*buffer));
1020 	ASSERT_NE(buffer, NULL);
1021 
1022 	buffer->fd = -1;
1023 	buffer->size = size;
1024 	buffer->mirror = malloc(size);
1025 	ASSERT_NE(buffer->mirror, NULL);
1026 
1027 	buffer->ptr = mmap(NULL, size,
1028 			   PROT_READ | PROT_WRITE,
1029 			   MAP_PRIVATE | MAP_ANONYMOUS,
1030 			   buffer->fd, 0);
1031 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1032 
1033 	/* Initialize buffer in system memory. */
1034 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1035 		ptr[i] = i;
1036 
1037 	/* Migrate memory to device. */
1038 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1039 	ASSERT_EQ(ret, 0);
1040 	ASSERT_EQ(buffer->cpages, npages);
1041 
1042 	/* Check what the device read. */
1043 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1044 		ASSERT_EQ(ptr[i], i);
1045 
1046 	/* Fault half the pages back to system memory and check them. */
1047 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1048 		ASSERT_EQ(ptr[i], i);
1049 
1050 	/* Migrate memory to the device again. */
1051 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1052 	ASSERT_EQ(ret, 0);
1053 	ASSERT_EQ(buffer->cpages, npages);
1054 
1055 	/* Check what the device read. */
1056 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1057 		ASSERT_EQ(ptr[i], i);
1058 
1059 	hmm_buffer_free(buffer);
1060 }
1061 
TEST_F(hmm,migrate_release)1062 TEST_F(hmm, migrate_release)
1063 {
1064 	struct hmm_buffer *buffer;
1065 	unsigned long npages;
1066 	unsigned long size;
1067 	unsigned long i;
1068 	int *ptr;
1069 	int ret;
1070 
1071 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1072 	ASSERT_NE(npages, 0);
1073 	size = npages << self->page_shift;
1074 
1075 	buffer = malloc(sizeof(*buffer));
1076 	ASSERT_NE(buffer, NULL);
1077 
1078 	buffer->fd = -1;
1079 	buffer->size = size;
1080 	buffer->mirror = malloc(size);
1081 	ASSERT_NE(buffer->mirror, NULL);
1082 
1083 	buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1084 			   MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
1085 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1086 
1087 	/* Initialize buffer in system memory. */
1088 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1089 		ptr[i] = i;
1090 
1091 	/* Migrate memory to device. */
1092 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1093 	ASSERT_EQ(ret, 0);
1094 	ASSERT_EQ(buffer->cpages, npages);
1095 
1096 	/* Check what the device read. */
1097 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1098 		ASSERT_EQ(ptr[i], i);
1099 
1100 	/* Release device memory. */
1101 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
1102 	ASSERT_EQ(ret, 0);
1103 
1104 	/* Fault pages back to system memory and check them. */
1105 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1106 		ASSERT_EQ(ptr[i], i);
1107 
1108 	hmm_buffer_free(buffer);
1109 }
1110 
1111 /*
1112  * Migrate anonymous shared memory to device private memory.
1113  */
TEST_F(hmm,migrate_shared)1114 TEST_F(hmm, migrate_shared)
1115 {
1116 	struct hmm_buffer *buffer;
1117 	unsigned long npages;
1118 	unsigned long size;
1119 	int ret;
1120 
1121 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1122 	ASSERT_NE(npages, 0);
1123 	size = npages << self->page_shift;
1124 
1125 	buffer = malloc(sizeof(*buffer));
1126 	ASSERT_NE(buffer, NULL);
1127 
1128 	buffer->fd = -1;
1129 	buffer->size = size;
1130 	buffer->mirror = malloc(size);
1131 	ASSERT_NE(buffer->mirror, NULL);
1132 
1133 	buffer->ptr = mmap(NULL, size,
1134 			   PROT_READ | PROT_WRITE,
1135 			   MAP_SHARED | MAP_ANONYMOUS,
1136 			   buffer->fd, 0);
1137 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1138 
1139 	/* Migrate memory to device. */
1140 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1141 	ASSERT_EQ(ret, -ENOENT);
1142 
1143 	hmm_buffer_free(buffer);
1144 }
1145 
1146 /*
1147  * Try to migrate various memory types to device private memory.
1148  */
TEST_F(hmm2,migrate_mixed)1149 TEST_F(hmm2, migrate_mixed)
1150 {
1151 	struct hmm_buffer *buffer;
1152 	unsigned long npages;
1153 	unsigned long size;
1154 	int *ptr;
1155 	unsigned char *p;
1156 	int ret;
1157 	int val;
1158 
1159 	npages = 6;
1160 	size = npages << self->page_shift;
1161 
1162 	buffer = malloc(sizeof(*buffer));
1163 	ASSERT_NE(buffer, NULL);
1164 
1165 	buffer->fd = -1;
1166 	buffer->size = size;
1167 	buffer->mirror = malloc(size);
1168 	ASSERT_NE(buffer->mirror, NULL);
1169 
1170 	/* Reserve a range of addresses. */
1171 	buffer->ptr = mmap(NULL, size,
1172 			   PROT_NONE,
1173 			   MAP_PRIVATE | MAP_ANONYMOUS,
1174 			   buffer->fd, 0);
1175 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1176 	p = buffer->ptr;
1177 
1178 	/* Migrating a protected area should be an error. */
1179 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1180 	ASSERT_EQ(ret, -EINVAL);
1181 
1182 	/* Punch a hole after the first page address. */
1183 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1184 	ASSERT_EQ(ret, 0);
1185 
1186 	/* We expect an error if the vma doesn't cover the range. */
1187 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1188 	ASSERT_EQ(ret, -EINVAL);
1189 
1190 	/* Page 2 will be a read-only zero page. */
1191 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1192 				PROT_READ);
1193 	ASSERT_EQ(ret, 0);
1194 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1195 	val = *ptr + 3;
1196 	ASSERT_EQ(val, 3);
1197 
1198 	/* Page 3 will be read-only. */
1199 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1200 				PROT_READ | PROT_WRITE);
1201 	ASSERT_EQ(ret, 0);
1202 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1203 	*ptr = val;
1204 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1205 				PROT_READ);
1206 	ASSERT_EQ(ret, 0);
1207 
1208 	/* Page 4-5 will be read-write. */
1209 	ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1210 				PROT_READ | PROT_WRITE);
1211 	ASSERT_EQ(ret, 0);
1212 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1213 	*ptr = val;
1214 	ptr = (int *)(buffer->ptr + 5 * self->page_size);
1215 	*ptr = val;
1216 
1217 	/* Now try to migrate pages 2-5 to device 1. */
1218 	buffer->ptr = p + 2 * self->page_size;
1219 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1220 	ASSERT_EQ(ret, 0);
1221 	ASSERT_EQ(buffer->cpages, 4);
1222 
1223 	/* Page 5 won't be migrated to device 0 because it's on device 1. */
1224 	buffer->ptr = p + 5 * self->page_size;
1225 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1226 	ASSERT_EQ(ret, -ENOENT);
1227 	buffer->ptr = p;
1228 
1229 	buffer->ptr = p;
1230 	hmm_buffer_free(buffer);
1231 }
1232 
1233 /*
1234  * Migrate anonymous memory to device memory and back to system memory
1235  * multiple times. In case of private zone configuration, this is done
1236  * through fault pages accessed by CPU. In case of coherent zone configuration,
1237  * the pages from the device should be explicitly migrated back to system memory.
1238  * The reason is Coherent device zone has coherent access by CPU, therefore
1239  * it will not generate any page fault.
1240  */
TEST_F(hmm,migrate_multiple)1241 TEST_F(hmm, migrate_multiple)
1242 {
1243 	struct hmm_buffer *buffer;
1244 	unsigned long npages;
1245 	unsigned long size;
1246 	unsigned long i;
1247 	unsigned long c;
1248 	int *ptr;
1249 	int ret;
1250 
1251 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1252 	ASSERT_NE(npages, 0);
1253 	size = npages << self->page_shift;
1254 
1255 	for (c = 0; c < NTIMES; c++) {
1256 		buffer = malloc(sizeof(*buffer));
1257 		ASSERT_NE(buffer, NULL);
1258 
1259 		buffer->fd = -1;
1260 		buffer->size = size;
1261 		buffer->mirror = malloc(size);
1262 		ASSERT_NE(buffer->mirror, NULL);
1263 
1264 		buffer->ptr = mmap(NULL, size,
1265 				   PROT_READ | PROT_WRITE,
1266 				   MAP_PRIVATE | MAP_ANONYMOUS,
1267 				   buffer->fd, 0);
1268 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1269 
1270 		/* Initialize buffer in system memory. */
1271 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1272 			ptr[i] = i;
1273 
1274 		/* Migrate memory to device. */
1275 		ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1276 		ASSERT_EQ(ret, 0);
1277 		ASSERT_EQ(buffer->cpages, npages);
1278 
1279 		/* Check what the device read. */
1280 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1281 			ASSERT_EQ(ptr[i], i);
1282 
1283 		/* Migrate back to system memory and check them. */
1284 		if (hmm_is_coherent_type(variant->device_number)) {
1285 			ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1286 			ASSERT_EQ(ret, 0);
1287 			ASSERT_EQ(buffer->cpages, npages);
1288 		}
1289 
1290 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1291 			ASSERT_EQ(ptr[i], i);
1292 
1293 		hmm_buffer_free(buffer);
1294 	}
1295 }
1296 
1297 /*
1298  * Read anonymous memory multiple times.
1299  */
TEST_F(hmm,anon_read_multiple)1300 TEST_F(hmm, anon_read_multiple)
1301 {
1302 	struct hmm_buffer *buffer;
1303 	unsigned long npages;
1304 	unsigned long size;
1305 	unsigned long i;
1306 	unsigned long c;
1307 	int *ptr;
1308 	int ret;
1309 
1310 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1311 	ASSERT_NE(npages, 0);
1312 	size = npages << self->page_shift;
1313 
1314 	for (c = 0; c < NTIMES; c++) {
1315 		buffer = malloc(sizeof(*buffer));
1316 		ASSERT_NE(buffer, NULL);
1317 
1318 		buffer->fd = -1;
1319 		buffer->size = size;
1320 		buffer->mirror = malloc(size);
1321 		ASSERT_NE(buffer->mirror, NULL);
1322 
1323 		buffer->ptr = mmap(NULL, size,
1324 				   PROT_READ | PROT_WRITE,
1325 				   MAP_PRIVATE | MAP_ANONYMOUS,
1326 				   buffer->fd, 0);
1327 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1328 
1329 		/* Initialize buffer in system memory. */
1330 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1331 			ptr[i] = i + c;
1332 
1333 		/* Simulate a device reading system memory. */
1334 		ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1335 				      npages);
1336 		ASSERT_EQ(ret, 0);
1337 		ASSERT_EQ(buffer->cpages, npages);
1338 		ASSERT_EQ(buffer->faults, 1);
1339 
1340 		/* Check what the device read. */
1341 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1342 			ASSERT_EQ(ptr[i], i + c);
1343 
1344 		hmm_buffer_free(buffer);
1345 	}
1346 }
1347 
unmap_buffer(void * p)1348 void *unmap_buffer(void *p)
1349 {
1350 	struct hmm_buffer *buffer = p;
1351 
1352 	/* Delay for a bit and then unmap buffer while it is being read. */
1353 	hmm_nanosleep(hmm_random() % 32000);
1354 	munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1355 	buffer->ptr = NULL;
1356 
1357 	return NULL;
1358 }
1359 
1360 /*
1361  * Try reading anonymous memory while it is being unmapped.
1362  */
TEST_F(hmm,anon_teardown)1363 TEST_F(hmm, anon_teardown)
1364 {
1365 	unsigned long npages;
1366 	unsigned long size;
1367 	unsigned long c;
1368 	void *ret;
1369 
1370 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1371 	ASSERT_NE(npages, 0);
1372 	size = npages << self->page_shift;
1373 
1374 	for (c = 0; c < NTIMES; ++c) {
1375 		pthread_t thread;
1376 		struct hmm_buffer *buffer;
1377 		unsigned long i;
1378 		int *ptr;
1379 		int rc;
1380 
1381 		buffer = malloc(sizeof(*buffer));
1382 		ASSERT_NE(buffer, NULL);
1383 
1384 		buffer->fd = -1;
1385 		buffer->size = size;
1386 		buffer->mirror = malloc(size);
1387 		ASSERT_NE(buffer->mirror, NULL);
1388 
1389 		buffer->ptr = mmap(NULL, size,
1390 				   PROT_READ | PROT_WRITE,
1391 				   MAP_PRIVATE | MAP_ANONYMOUS,
1392 				   buffer->fd, 0);
1393 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1394 
1395 		/* Initialize buffer in system memory. */
1396 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1397 			ptr[i] = i + c;
1398 
1399 		rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1400 		ASSERT_EQ(rc, 0);
1401 
1402 		/* Simulate a device reading system memory. */
1403 		rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1404 				     npages);
1405 		if (rc == 0) {
1406 			ASSERT_EQ(buffer->cpages, npages);
1407 			ASSERT_EQ(buffer->faults, 1);
1408 
1409 			/* Check what the device read. */
1410 			for (i = 0, ptr = buffer->mirror;
1411 			     i < size / sizeof(*ptr);
1412 			     ++i)
1413 				ASSERT_EQ(ptr[i], i + c);
1414 		}
1415 
1416 		pthread_join(thread, &ret);
1417 		hmm_buffer_free(buffer);
1418 	}
1419 }
1420 
1421 /*
1422  * Test memory snapshot without faulting in pages accessed by the device.
1423  */
TEST_F(hmm,mixedmap)1424 TEST_F(hmm, mixedmap)
1425 {
1426 	struct hmm_buffer *buffer;
1427 	unsigned long npages;
1428 	unsigned long size;
1429 	unsigned char *m;
1430 	int ret;
1431 
1432 	npages = 1;
1433 	size = npages << self->page_shift;
1434 
1435 	buffer = malloc(sizeof(*buffer));
1436 	ASSERT_NE(buffer, NULL);
1437 
1438 	buffer->fd = -1;
1439 	buffer->size = size;
1440 	buffer->mirror = malloc(npages);
1441 	ASSERT_NE(buffer->mirror, NULL);
1442 
1443 
1444 	/* Reserve a range of addresses. */
1445 	buffer->ptr = mmap(NULL, size,
1446 			   PROT_READ | PROT_WRITE,
1447 			   MAP_PRIVATE,
1448 			   self->fd, 0);
1449 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1450 
1451 	/* Simulate a device snapshotting CPU pagetables. */
1452 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1453 	ASSERT_EQ(ret, 0);
1454 	ASSERT_EQ(buffer->cpages, npages);
1455 
1456 	/* Check what the device saw. */
1457 	m = buffer->mirror;
1458 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1459 
1460 	hmm_buffer_free(buffer);
1461 }
1462 
1463 /*
1464  * Test memory snapshot without faulting in pages accessed by the device.
1465  */
TEST_F(hmm2,snapshot)1466 TEST_F(hmm2, snapshot)
1467 {
1468 	struct hmm_buffer *buffer;
1469 	unsigned long npages;
1470 	unsigned long size;
1471 	int *ptr;
1472 	unsigned char *p;
1473 	unsigned char *m;
1474 	int ret;
1475 	int val;
1476 
1477 	npages = 7;
1478 	size = npages << self->page_shift;
1479 
1480 	buffer = malloc(sizeof(*buffer));
1481 	ASSERT_NE(buffer, NULL);
1482 
1483 	buffer->fd = -1;
1484 	buffer->size = size;
1485 	buffer->mirror = malloc(npages);
1486 	ASSERT_NE(buffer->mirror, NULL);
1487 
1488 	/* Reserve a range of addresses. */
1489 	buffer->ptr = mmap(NULL, size,
1490 			   PROT_NONE,
1491 			   MAP_PRIVATE | MAP_ANONYMOUS,
1492 			   buffer->fd, 0);
1493 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1494 	p = buffer->ptr;
1495 
1496 	/* Punch a hole after the first page address. */
1497 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1498 	ASSERT_EQ(ret, 0);
1499 
1500 	/* Page 2 will be read-only zero page. */
1501 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1502 				PROT_READ);
1503 	ASSERT_EQ(ret, 0);
1504 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1505 	val = *ptr + 3;
1506 	ASSERT_EQ(val, 3);
1507 
1508 	/* Page 3 will be read-only. */
1509 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1510 				PROT_READ | PROT_WRITE);
1511 	ASSERT_EQ(ret, 0);
1512 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1513 	*ptr = val;
1514 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1515 				PROT_READ);
1516 	ASSERT_EQ(ret, 0);
1517 
1518 	/* Page 4-6 will be read-write. */
1519 	ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1520 				PROT_READ | PROT_WRITE);
1521 	ASSERT_EQ(ret, 0);
1522 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1523 	*ptr = val;
1524 
1525 	/* Page 5 will be migrated to device 0. */
1526 	buffer->ptr = p + 5 * self->page_size;
1527 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1528 	ASSERT_EQ(ret, 0);
1529 	ASSERT_EQ(buffer->cpages, 1);
1530 
1531 	/* Page 6 will be migrated to device 1. */
1532 	buffer->ptr = p + 6 * self->page_size;
1533 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1534 	ASSERT_EQ(ret, 0);
1535 	ASSERT_EQ(buffer->cpages, 1);
1536 
1537 	/* Simulate a device snapshotting CPU pagetables. */
1538 	buffer->ptr = p;
1539 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1540 	ASSERT_EQ(ret, 0);
1541 	ASSERT_EQ(buffer->cpages, npages);
1542 
1543 	/* Check what the device saw. */
1544 	m = buffer->mirror;
1545 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1546 	ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1547 	ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1548 	ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1549 	ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1550 	if (!hmm_is_coherent_type(variant->device_number0)) {
1551 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1552 				HMM_DMIRROR_PROT_WRITE);
1553 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1554 	} else {
1555 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1556 				HMM_DMIRROR_PROT_WRITE);
1557 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1558 				HMM_DMIRROR_PROT_WRITE);
1559 	}
1560 
1561 	hmm_buffer_free(buffer);
1562 }
1563 
1564 /*
1565  * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1566  * should be mapped by a large page table entry.
1567  */
TEST_F(hmm,compound)1568 TEST_F(hmm, compound)
1569 {
1570 	struct hmm_buffer *buffer;
1571 	unsigned long npages;
1572 	unsigned long size;
1573 	unsigned long default_hsize;
1574 	int *ptr;
1575 	unsigned char *m;
1576 	int ret;
1577 	unsigned long i;
1578 
1579 	/* Skip test if we can't allocate a hugetlbfs page. */
1580 
1581 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
1582 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
1583 		SKIP(return, "Huge page size could not be determined");
1584 	default_hsize = default_hsize*1024; /* KB to B */
1585 
1586 	size = ALIGN(TWOMEG, default_hsize);
1587 	npages = size >> self->page_shift;
1588 
1589 	buffer = malloc(sizeof(*buffer));
1590 	ASSERT_NE(buffer, NULL);
1591 
1592 	buffer->ptr = mmap(NULL, size,
1593 				   PROT_READ | PROT_WRITE,
1594 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1595 				   -1, 0);
1596 	if (buffer->ptr == MAP_FAILED) {
1597 		free(buffer);
1598 		return;
1599 	}
1600 
1601 	buffer->size = size;
1602 	buffer->mirror = malloc(npages);
1603 	ASSERT_NE(buffer->mirror, NULL);
1604 
1605 	/* Initialize the pages the device will snapshot in buffer->ptr. */
1606 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1607 		ptr[i] = i;
1608 
1609 	/* Simulate a device snapshotting CPU pagetables. */
1610 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1611 	ASSERT_EQ(ret, 0);
1612 	ASSERT_EQ(buffer->cpages, npages);
1613 
1614 	/* Check what the device saw. */
1615 	m = buffer->mirror;
1616 	for (i = 0; i < npages; ++i)
1617 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1618 				HMM_DMIRROR_PROT_PMD);
1619 
1620 	/* Make the region read-only. */
1621 	ret = mprotect(buffer->ptr, size, PROT_READ);
1622 	ASSERT_EQ(ret, 0);
1623 
1624 	/* Simulate a device snapshotting CPU pagetables. */
1625 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1626 	ASSERT_EQ(ret, 0);
1627 	ASSERT_EQ(buffer->cpages, npages);
1628 
1629 	/* Check what the device saw. */
1630 	m = buffer->mirror;
1631 	for (i = 0; i < npages; ++i)
1632 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1633 				HMM_DMIRROR_PROT_PMD);
1634 
1635 	munmap(buffer->ptr, buffer->size);
1636 	buffer->ptr = NULL;
1637 	hmm_buffer_free(buffer);
1638 }
1639 
1640 /*
1641  * Test two devices reading the same memory (double mapped).
1642  */
TEST_F(hmm2,double_map)1643 TEST_F(hmm2, double_map)
1644 {
1645 	struct hmm_buffer *buffer;
1646 	unsigned long npages;
1647 	unsigned long size;
1648 	unsigned long i;
1649 	int *ptr;
1650 	int ret;
1651 
1652 	npages = 6;
1653 	size = npages << self->page_shift;
1654 
1655 	buffer = malloc(sizeof(*buffer));
1656 	ASSERT_NE(buffer, NULL);
1657 
1658 	buffer->fd = -1;
1659 	buffer->size = size;
1660 	buffer->mirror = malloc(npages);
1661 	ASSERT_NE(buffer->mirror, NULL);
1662 
1663 	/* Reserve a range of addresses. */
1664 	buffer->ptr = mmap(NULL, size,
1665 			   PROT_READ | PROT_WRITE,
1666 			   MAP_PRIVATE | MAP_ANONYMOUS,
1667 			   buffer->fd, 0);
1668 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1669 
1670 	/* Initialize buffer in system memory. */
1671 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1672 		ptr[i] = i;
1673 
1674 	/* Make region read-only. */
1675 	ret = mprotect(buffer->ptr, size, PROT_READ);
1676 	ASSERT_EQ(ret, 0);
1677 
1678 	/* Simulate device 0 reading system memory. */
1679 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1680 	ASSERT_EQ(ret, 0);
1681 	ASSERT_EQ(buffer->cpages, npages);
1682 	ASSERT_EQ(buffer->faults, 1);
1683 
1684 	/* Check what the device read. */
1685 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1686 		ASSERT_EQ(ptr[i], i);
1687 
1688 	/* Simulate device 1 reading system memory. */
1689 	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1690 	ASSERT_EQ(ret, 0);
1691 	ASSERT_EQ(buffer->cpages, npages);
1692 	ASSERT_EQ(buffer->faults, 1);
1693 
1694 	/* Check what the device read. */
1695 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1696 		ASSERT_EQ(ptr[i], i);
1697 
1698 	/* Migrate pages to device 1 and try to read from device 0. */
1699 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1700 	ASSERT_EQ(ret, 0);
1701 	ASSERT_EQ(buffer->cpages, npages);
1702 
1703 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1704 	ASSERT_EQ(ret, 0);
1705 	ASSERT_EQ(buffer->cpages, npages);
1706 	ASSERT_EQ(buffer->faults, 1);
1707 
1708 	/* Check what device 0 read. */
1709 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1710 		ASSERT_EQ(ptr[i], i);
1711 
1712 	hmm_buffer_free(buffer);
1713 }
1714 
1715 /*
1716  * Basic check of exclusive faulting.
1717  */
TEST_F(hmm,exclusive)1718 TEST_F(hmm, exclusive)
1719 {
1720 	struct hmm_buffer *buffer;
1721 	unsigned long npages;
1722 	unsigned long size;
1723 	unsigned long i;
1724 	int *ptr;
1725 	int ret;
1726 
1727 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1728 	ASSERT_NE(npages, 0);
1729 	size = npages << self->page_shift;
1730 
1731 	buffer = malloc(sizeof(*buffer));
1732 	ASSERT_NE(buffer, NULL);
1733 
1734 	buffer->fd = -1;
1735 	buffer->size = size;
1736 	buffer->mirror = malloc(size);
1737 	ASSERT_NE(buffer->mirror, NULL);
1738 
1739 	buffer->ptr = mmap(NULL, size,
1740 			   PROT_READ | PROT_WRITE,
1741 			   MAP_PRIVATE | MAP_ANONYMOUS,
1742 			   buffer->fd, 0);
1743 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1744 
1745 	/* Initialize buffer in system memory. */
1746 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1747 		ptr[i] = i;
1748 
1749 	/* Map memory exclusively for device access. */
1750 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1751 	ASSERT_EQ(ret, 0);
1752 	ASSERT_EQ(buffer->cpages, npages);
1753 
1754 	/* Check what the device read. */
1755 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1756 		ASSERT_EQ(ptr[i], i);
1757 
1758 	/* Fault pages back to system memory and check them. */
1759 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1760 		ASSERT_EQ(ptr[i]++, i);
1761 
1762 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1763 		ASSERT_EQ(ptr[i], i+1);
1764 
1765 	/* Check atomic access revoked */
1766 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1767 	ASSERT_EQ(ret, 0);
1768 
1769 	hmm_buffer_free(buffer);
1770 }
1771 
TEST_F(hmm,exclusive_mprotect)1772 TEST_F(hmm, exclusive_mprotect)
1773 {
1774 	struct hmm_buffer *buffer;
1775 	unsigned long npages;
1776 	unsigned long size;
1777 	unsigned long i;
1778 	int *ptr;
1779 	int ret;
1780 
1781 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1782 	ASSERT_NE(npages, 0);
1783 	size = npages << self->page_shift;
1784 
1785 	buffer = malloc(sizeof(*buffer));
1786 	ASSERT_NE(buffer, NULL);
1787 
1788 	buffer->fd = -1;
1789 	buffer->size = size;
1790 	buffer->mirror = malloc(size);
1791 	ASSERT_NE(buffer->mirror, NULL);
1792 
1793 	buffer->ptr = mmap(NULL, size,
1794 			   PROT_READ | PROT_WRITE,
1795 			   MAP_PRIVATE | MAP_ANONYMOUS,
1796 			   buffer->fd, 0);
1797 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1798 
1799 	/* Initialize buffer in system memory. */
1800 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1801 		ptr[i] = i;
1802 
1803 	/* Map memory exclusively for device access. */
1804 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1805 	ASSERT_EQ(ret, 0);
1806 	ASSERT_EQ(buffer->cpages, npages);
1807 
1808 	/* Check what the device read. */
1809 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1810 		ASSERT_EQ(ptr[i], i);
1811 
1812 	ret = mprotect(buffer->ptr, size, PROT_READ);
1813 	ASSERT_EQ(ret, 0);
1814 
1815 	/* Simulate a device writing system memory. */
1816 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1817 	ASSERT_EQ(ret, -EPERM);
1818 
1819 	hmm_buffer_free(buffer);
1820 }
1821 
1822 /*
1823  * Check copy-on-write works.
1824  */
TEST_F(hmm,exclusive_cow)1825 TEST_F(hmm, exclusive_cow)
1826 {
1827 	struct hmm_buffer *buffer;
1828 	unsigned long npages;
1829 	unsigned long size;
1830 	unsigned long i;
1831 	int *ptr;
1832 	int ret;
1833 
1834 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1835 	ASSERT_NE(npages, 0);
1836 	size = npages << self->page_shift;
1837 
1838 	buffer = malloc(sizeof(*buffer));
1839 	ASSERT_NE(buffer, NULL);
1840 
1841 	buffer->fd = -1;
1842 	buffer->size = size;
1843 	buffer->mirror = malloc(size);
1844 	ASSERT_NE(buffer->mirror, NULL);
1845 
1846 	buffer->ptr = mmap(NULL, size,
1847 			   PROT_READ | PROT_WRITE,
1848 			   MAP_PRIVATE | MAP_ANONYMOUS,
1849 			   buffer->fd, 0);
1850 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1851 
1852 	/* Initialize buffer in system memory. */
1853 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1854 		ptr[i] = i;
1855 
1856 	/* Map memory exclusively for device access. */
1857 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1858 	ASSERT_EQ(ret, 0);
1859 	ASSERT_EQ(buffer->cpages, npages);
1860 
1861 	fork();
1862 
1863 	/* Fault pages back to system memory and check them. */
1864 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1865 		ASSERT_EQ(ptr[i]++, i);
1866 
1867 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1868 		ASSERT_EQ(ptr[i], i+1);
1869 
1870 	hmm_buffer_free(buffer);
1871 }
1872 
gup_test_exec(int gup_fd,unsigned long addr,int cmd,int npages,int size,int flags)1873 static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1874 			 int npages, int size, int flags)
1875 {
1876 	struct gup_test gup = {
1877 		.nr_pages_per_call	= npages,
1878 		.addr			= addr,
1879 		.gup_flags		= FOLL_WRITE | flags,
1880 		.size			= size,
1881 	};
1882 
1883 	if (ioctl(gup_fd, cmd, &gup)) {
1884 		perror("ioctl on error\n");
1885 		return errno;
1886 	}
1887 
1888 	return 0;
1889 }
1890 
1891 /*
1892  * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1893  * This should trigger a migration back to system memory for both, private
1894  * and coherent type pages.
1895  * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1896  * to your configuration before you run it.
1897  */
TEST_F(hmm,hmm_gup_test)1898 TEST_F(hmm, hmm_gup_test)
1899 {
1900 	struct hmm_buffer *buffer;
1901 	int gup_fd;
1902 	unsigned long npages;
1903 	unsigned long size;
1904 	unsigned long i;
1905 	int *ptr;
1906 	int ret;
1907 	unsigned char *m;
1908 
1909 	gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1910 	if (gup_fd == -1)
1911 		SKIP(return, "Skipping test, could not find gup_test driver");
1912 
1913 	npages = 4;
1914 	size = npages << self->page_shift;
1915 
1916 	buffer = malloc(sizeof(*buffer));
1917 	ASSERT_NE(buffer, NULL);
1918 
1919 	buffer->fd = -1;
1920 	buffer->size = size;
1921 	buffer->mirror = malloc(size);
1922 	ASSERT_NE(buffer->mirror, NULL);
1923 
1924 	buffer->ptr = mmap(NULL, size,
1925 			   PROT_READ | PROT_WRITE,
1926 			   MAP_PRIVATE | MAP_ANONYMOUS,
1927 			   buffer->fd, 0);
1928 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1929 
1930 	/* Initialize buffer in system memory. */
1931 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1932 		ptr[i] = i;
1933 
1934 	/* Migrate memory to device. */
1935 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1936 	ASSERT_EQ(ret, 0);
1937 	ASSERT_EQ(buffer->cpages, npages);
1938 	/* Check what the device read. */
1939 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1940 		ASSERT_EQ(ptr[i], i);
1941 
1942 	ASSERT_EQ(gup_test_exec(gup_fd,
1943 				(unsigned long)buffer->ptr,
1944 				GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1945 	ASSERT_EQ(gup_test_exec(gup_fd,
1946 				(unsigned long)buffer->ptr + 1 * self->page_size,
1947 				GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1948 	ASSERT_EQ(gup_test_exec(gup_fd,
1949 				(unsigned long)buffer->ptr + 2 * self->page_size,
1950 				PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1951 	ASSERT_EQ(gup_test_exec(gup_fd,
1952 				(unsigned long)buffer->ptr + 3 * self->page_size,
1953 				PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1954 
1955 	/* Take snapshot to CPU pagetables */
1956 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1957 	ASSERT_EQ(ret, 0);
1958 	ASSERT_EQ(buffer->cpages, npages);
1959 	m = buffer->mirror;
1960 	if (hmm_is_coherent_type(variant->device_number)) {
1961 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1962 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1963 	} else {
1964 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1965 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1966 	}
1967 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1968 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1969 	/*
1970 	 * Check again the content on the pages. Make sure there's no
1971 	 * corrupted data.
1972 	 */
1973 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1974 		ASSERT_EQ(ptr[i], i);
1975 
1976 	close(gup_fd);
1977 	hmm_buffer_free(buffer);
1978 }
1979 
1980 /*
1981  * Test copy-on-write in device pages.
1982  * In case of writing to COW private page(s), a page fault will migrate pages
1983  * back to system memory first. Then, these pages will be duplicated. In case
1984  * of COW device coherent type, pages are duplicated directly from device
1985  * memory.
1986  */
TEST_F(hmm,hmm_cow_in_device)1987 TEST_F(hmm, hmm_cow_in_device)
1988 {
1989 	struct hmm_buffer *buffer;
1990 	unsigned long npages;
1991 	unsigned long size;
1992 	unsigned long i;
1993 	int *ptr;
1994 	int ret;
1995 	unsigned char *m;
1996 	pid_t pid;
1997 	int status;
1998 
1999 	npages = 4;
2000 	size = npages << self->page_shift;
2001 
2002 	buffer = malloc(sizeof(*buffer));
2003 	ASSERT_NE(buffer, NULL);
2004 
2005 	buffer->fd = -1;
2006 	buffer->size = size;
2007 	buffer->mirror = malloc(size);
2008 	ASSERT_NE(buffer->mirror, NULL);
2009 
2010 	buffer->ptr = mmap(NULL, size,
2011 			   PROT_READ | PROT_WRITE,
2012 			   MAP_PRIVATE | MAP_ANONYMOUS,
2013 			   buffer->fd, 0);
2014 	ASSERT_NE(buffer->ptr, MAP_FAILED);
2015 
2016 	/* Initialize buffer in system memory. */
2017 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2018 		ptr[i] = i;
2019 
2020 	/* Migrate memory to device. */
2021 
2022 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
2023 	ASSERT_EQ(ret, 0);
2024 	ASSERT_EQ(buffer->cpages, npages);
2025 
2026 	pid = fork();
2027 	if (pid == -1)
2028 		ASSERT_EQ(pid, 0);
2029 	if (!pid) {
2030 		/* Child process waitd for SIGTERM from the parent. */
2031 		while (1) {
2032 		}
2033 		perror("Should not reach this\n");
2034 		exit(0);
2035 	}
2036 	/* Parent process writes to COW pages(s) and gets a
2037 	 * new copy in system. In case of device private pages,
2038 	 * this write causes a migration to system mem first.
2039 	 */
2040 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2041 		ptr[i] = i;
2042 
2043 	/* Terminate child and wait */
2044 	EXPECT_EQ(0, kill(pid, SIGTERM));
2045 	EXPECT_EQ(pid, waitpid(pid, &status, 0));
2046 	EXPECT_NE(0, WIFSIGNALED(status));
2047 	EXPECT_EQ(SIGTERM, WTERMSIG(status));
2048 
2049 	/* Take snapshot to CPU pagetables */
2050 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
2051 	ASSERT_EQ(ret, 0);
2052 	ASSERT_EQ(buffer->cpages, npages);
2053 	m = buffer->mirror;
2054 	for (i = 0; i < npages; i++)
2055 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
2056 
2057 	hmm_buffer_free(buffer);
2058 }
2059 TEST_HARNESS_MAIN
2060