• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test cases for the drm_mm range manager
4  */
5 
6 #define pr_fmt(fmt) "drm_mm: " fmt
7 
8 #include <linux/module.h>
9 #include <linux/prime_numbers.h>
10 #include <linux/slab.h>
11 #include <linux/random.h>
12 #include <linux/vmalloc.h>
13 
14 #include <drm/drm_mm.h>
15 
16 #include "../lib/drm_random.h"
17 
18 #define TESTS "drm_mm_selftests.h"
19 #include "drm_selftest.h"
20 
21 static unsigned int random_seed;
22 static unsigned int max_iterations = 8192;
23 static unsigned int max_prime = 128;
24 
25 enum {
26 	BEST,
27 	BOTTOMUP,
28 	TOPDOWN,
29 	EVICT,
30 };
31 
32 static const struct insert_mode {
33 	const char *name;
34 	enum drm_mm_insert_mode mode;
35 } insert_modes[] = {
36 	[BEST] = { "best", DRM_MM_INSERT_BEST },
37 	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
38 	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
39 	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
40 	{}
41 }, evict_modes[] = {
42 	{ "bottom-up", DRM_MM_INSERT_LOW },
43 	{ "top-down", DRM_MM_INSERT_HIGH },
44 	{}
45 };
46 
igt_sanitycheck(void * ignored)47 static int igt_sanitycheck(void *ignored)
48 {
49 	pr_info("%s - ok!\n", __func__);
50 	return 0;
51 }
52 
assert_no_holes(const struct drm_mm * mm)53 static bool assert_no_holes(const struct drm_mm *mm)
54 {
55 	struct drm_mm_node *hole;
56 	u64 hole_start, hole_end;
57 	unsigned long count;
58 
59 	count = 0;
60 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
61 		count++;
62 	if (count) {
63 		pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
64 		return false;
65 	}
66 
67 	drm_mm_for_each_node(hole, mm) {
68 		if (drm_mm_hole_follows(hole)) {
69 			pr_err("Hole follows node, expected none!\n");
70 			return false;
71 		}
72 	}
73 
74 	return true;
75 }
76 
assert_one_hole(const struct drm_mm * mm,u64 start,u64 end)77 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
78 {
79 	struct drm_mm_node *hole;
80 	u64 hole_start, hole_end;
81 	unsigned long count;
82 	bool ok = true;
83 
84 	if (end <= start)
85 		return true;
86 
87 	count = 0;
88 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
89 		if (start != hole_start || end != hole_end) {
90 			if (ok)
91 				pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
92 				       hole_start, hole_end,
93 				       start, end);
94 			ok = false;
95 		}
96 		count++;
97 	}
98 	if (count != 1) {
99 		pr_err("Expected to find one hole, found %lu instead\n", count);
100 		ok = false;
101 	}
102 
103 	return ok;
104 }
105 
assert_continuous(const struct drm_mm * mm,u64 size)106 static bool assert_continuous(const struct drm_mm *mm, u64 size)
107 {
108 	struct drm_mm_node *node, *check, *found;
109 	unsigned long n;
110 	u64 addr;
111 
112 	if (!assert_no_holes(mm))
113 		return false;
114 
115 	n = 0;
116 	addr = 0;
117 	drm_mm_for_each_node(node, mm) {
118 		if (node->start != addr) {
119 			pr_err("node[%ld] list out of order, expected %llx found %llx\n",
120 			       n, addr, node->start);
121 			return false;
122 		}
123 
124 		if (node->size != size) {
125 			pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
126 			       n, size, node->size);
127 			return false;
128 		}
129 
130 		if (drm_mm_hole_follows(node)) {
131 			pr_err("node[%ld] is followed by a hole!\n", n);
132 			return false;
133 		}
134 
135 		found = NULL;
136 		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
137 			if (node != check) {
138 				pr_err("lookup return wrong node, expected start %llx, found %llx\n",
139 				       node->start, check->start);
140 				return false;
141 			}
142 			found = check;
143 		}
144 		if (!found) {
145 			pr_err("lookup failed for node %llx + %llx\n",
146 			       addr, size);
147 			return false;
148 		}
149 
150 		addr += size;
151 		n++;
152 	}
153 
154 	return true;
155 }
156 
misalignment(struct drm_mm_node * node,u64 alignment)157 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
158 {
159 	u64 rem;
160 
161 	if (!alignment)
162 		return 0;
163 
164 	div64_u64_rem(node->start, alignment, &rem);
165 	return rem;
166 }
167 
assert_node(struct drm_mm_node * node,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color)168 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
169 			u64 size, u64 alignment, unsigned long color)
170 {
171 	bool ok = true;
172 
173 	if (!drm_mm_node_allocated(node) || node->mm != mm) {
174 		pr_err("node not allocated\n");
175 		ok = false;
176 	}
177 
178 	if (node->size != size) {
179 		pr_err("node has wrong size, found %llu, expected %llu\n",
180 		       node->size, size);
181 		ok = false;
182 	}
183 
184 	if (misalignment(node, alignment)) {
185 		pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
186 		       node->start, misalignment(node, alignment), alignment);
187 		ok = false;
188 	}
189 
190 	if (node->color != color) {
191 		pr_err("node has wrong color, found %lu, expected %lu\n",
192 		       node->color, color);
193 		ok = false;
194 	}
195 
196 	return ok;
197 }
198 
199 #define show_mm(mm) do { \
200 	struct drm_printer __p = drm_debug_printer(__func__); \
201 	drm_mm_print((mm), &__p); } while (0)
202 
igt_init(void * ignored)203 static int igt_init(void *ignored)
204 {
205 	const unsigned int size = 4096;
206 	struct drm_mm mm;
207 	struct drm_mm_node tmp;
208 	int ret = -EINVAL;
209 
210 	/* Start with some simple checks on initialising the struct drm_mm */
211 	memset(&mm, 0, sizeof(mm));
212 	if (drm_mm_initialized(&mm)) {
213 		pr_err("zeroed mm claims to be initialized\n");
214 		return ret;
215 	}
216 
217 	memset(&mm, 0xff, sizeof(mm));
218 	drm_mm_init(&mm, 0, size);
219 	if (!drm_mm_initialized(&mm)) {
220 		pr_err("mm claims not to be initialized\n");
221 		goto out;
222 	}
223 
224 	if (!drm_mm_clean(&mm)) {
225 		pr_err("mm not empty on creation\n");
226 		goto out;
227 	}
228 
229 	/* After creation, it should all be one massive hole */
230 	if (!assert_one_hole(&mm, 0, size)) {
231 		ret = -EINVAL;
232 		goto out;
233 	}
234 
235 	memset(&tmp, 0, sizeof(tmp));
236 	tmp.start = 0;
237 	tmp.size = size;
238 	ret = drm_mm_reserve_node(&mm, &tmp);
239 	if (ret) {
240 		pr_err("failed to reserve whole drm_mm\n");
241 		goto out;
242 	}
243 
244 	/* After filling the range entirely, there should be no holes */
245 	if (!assert_no_holes(&mm)) {
246 		ret = -EINVAL;
247 		goto out;
248 	}
249 
250 	/* And then after emptying it again, the massive hole should be back */
251 	drm_mm_remove_node(&tmp);
252 	if (!assert_one_hole(&mm, 0, size)) {
253 		ret = -EINVAL;
254 		goto out;
255 	}
256 
257 out:
258 	if (ret)
259 		show_mm(&mm);
260 	drm_mm_takedown(&mm);
261 	return ret;
262 }
263 
igt_debug(void * ignored)264 static int igt_debug(void *ignored)
265 {
266 	struct drm_mm mm;
267 	struct drm_mm_node nodes[2];
268 	int ret;
269 
270 	/* Create a small drm_mm with a couple of nodes and a few holes, and
271 	 * check that the debug iterator doesn't explode over a trivial drm_mm.
272 	 */
273 
274 	drm_mm_init(&mm, 0, 4096);
275 
276 	memset(nodes, 0, sizeof(nodes));
277 	nodes[0].start = 512;
278 	nodes[0].size = 1024;
279 	ret = drm_mm_reserve_node(&mm, &nodes[0]);
280 	if (ret) {
281 		pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
282 		       nodes[0].start, nodes[0].size);
283 		return ret;
284 	}
285 
286 	nodes[1].size = 1024;
287 	nodes[1].start = 4096 - 512 - nodes[1].size;
288 	ret = drm_mm_reserve_node(&mm, &nodes[1]);
289 	if (ret) {
290 		pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
291 		       nodes[1].start, nodes[1].size);
292 		return ret;
293 	}
294 
295 	show_mm(&mm);
296 	return 0;
297 }
298 
set_node(struct drm_mm_node * node,u64 start,u64 size)299 static struct drm_mm_node *set_node(struct drm_mm_node *node,
300 				    u64 start, u64 size)
301 {
302 	node->start = start;
303 	node->size = size;
304 	return node;
305 }
306 
expect_reserve_fail(struct drm_mm * mm,struct drm_mm_node * node)307 static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
308 {
309 	int err;
310 
311 	err = drm_mm_reserve_node(mm, node);
312 	if (likely(err == -ENOSPC))
313 		return true;
314 
315 	if (!err) {
316 		pr_err("impossible reserve succeeded, node %llu + %llu\n",
317 		       node->start, node->size);
318 		drm_mm_remove_node(node);
319 	} else {
320 		pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
321 		       err, -ENOSPC, node->start, node->size);
322 	}
323 	return false;
324 }
325 
check_reserve_boundaries(struct drm_mm * mm,unsigned int count,u64 size)326 static bool check_reserve_boundaries(struct drm_mm *mm,
327 				     unsigned int count,
328 				     u64 size)
329 {
330 	const struct boundary {
331 		u64 start, size;
332 		const char *name;
333 	} boundaries[] = {
334 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
335 		B(0, 0),
336 		B(-size, 0),
337 		B(size, 0),
338 		B(size * count, 0),
339 		B(-size, size),
340 		B(-size, -size),
341 		B(-size, 2*size),
342 		B(0, -size),
343 		B(size, -size),
344 		B(count*size, size),
345 		B(count*size, -size),
346 		B(count*size, count*size),
347 		B(count*size, -count*size),
348 		B(count*size, -(count+1)*size),
349 		B((count+1)*size, size),
350 		B((count+1)*size, -size),
351 		B((count+1)*size, -2*size),
352 #undef B
353 	};
354 	struct drm_mm_node tmp = {};
355 	int n;
356 
357 	for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
358 		if (!expect_reserve_fail(mm,
359 					 set_node(&tmp,
360 						  boundaries[n].start,
361 						  boundaries[n].size))) {
362 			pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
363 			       n, boundaries[n].name, count, size);
364 			return false;
365 		}
366 	}
367 
368 	return true;
369 }
370 
__igt_reserve(unsigned int count,u64 size)371 static int __igt_reserve(unsigned int count, u64 size)
372 {
373 	DRM_RND_STATE(prng, random_seed);
374 	struct drm_mm mm;
375 	struct drm_mm_node tmp, *nodes, *node, *next;
376 	unsigned int *order, n, m, o = 0;
377 	int ret, err;
378 
379 	/* For exercising drm_mm_reserve_node(), we want to check that
380 	 * reservations outside of the drm_mm range are rejected, and to
381 	 * overlapping and otherwise already occupied ranges. Afterwards,
382 	 * the tree and nodes should be intact.
383 	 */
384 
385 	DRM_MM_BUG_ON(!count);
386 	DRM_MM_BUG_ON(!size);
387 
388 	ret = -ENOMEM;
389 	order = drm_random_order(count, &prng);
390 	if (!order)
391 		goto err;
392 
393 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
394 	if (!nodes)
395 		goto err_order;
396 
397 	ret = -EINVAL;
398 	drm_mm_init(&mm, 0, count * size);
399 
400 	if (!check_reserve_boundaries(&mm, count, size))
401 		goto out;
402 
403 	for (n = 0; n < count; n++) {
404 		nodes[n].start = order[n] * size;
405 		nodes[n].size = size;
406 
407 		err = drm_mm_reserve_node(&mm, &nodes[n]);
408 		if (err) {
409 			pr_err("reserve failed, step %d, start %llu\n",
410 			       n, nodes[n].start);
411 			ret = err;
412 			goto out;
413 		}
414 
415 		if (!drm_mm_node_allocated(&nodes[n])) {
416 			pr_err("reserved node not allocated! step %d, start %llu\n",
417 			       n, nodes[n].start);
418 			goto out;
419 		}
420 
421 		if (!expect_reserve_fail(&mm, &nodes[n]))
422 			goto out;
423 	}
424 
425 	/* After random insertion the nodes should be in order */
426 	if (!assert_continuous(&mm, size))
427 		goto out;
428 
429 	/* Repeated use should then fail */
430 	drm_random_reorder(order, count, &prng);
431 	for (n = 0; n < count; n++) {
432 		if (!expect_reserve_fail(&mm,
433 					 set_node(&tmp, order[n] * size, 1)))
434 			goto out;
435 
436 		/* Remove and reinsert should work */
437 		drm_mm_remove_node(&nodes[order[n]]);
438 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
439 		if (err) {
440 			pr_err("reserve failed, step %d, start %llu\n",
441 			       n, nodes[n].start);
442 			ret = err;
443 			goto out;
444 		}
445 	}
446 
447 	if (!assert_continuous(&mm, size))
448 		goto out;
449 
450 	/* Overlapping use should then fail */
451 	for (n = 0; n < count; n++) {
452 		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
453 			goto out;
454 	}
455 	for (n = 0; n < count; n++) {
456 		if (!expect_reserve_fail(&mm,
457 					 set_node(&tmp,
458 						  size * n,
459 						  size * (count - n))))
460 			goto out;
461 	}
462 
463 	/* Remove several, reinsert, check full */
464 	for_each_prime_number(n, min(max_prime, count)) {
465 		for (m = 0; m < n; m++) {
466 			node = &nodes[order[(o + m) % count]];
467 			drm_mm_remove_node(node);
468 		}
469 
470 		for (m = 0; m < n; m++) {
471 			node = &nodes[order[(o + m) % count]];
472 			err = drm_mm_reserve_node(&mm, node);
473 			if (err) {
474 				pr_err("reserve failed, step %d/%d, start %llu\n",
475 				       m, n, node->start);
476 				ret = err;
477 				goto out;
478 			}
479 		}
480 
481 		o += n;
482 
483 		if (!assert_continuous(&mm, size))
484 			goto out;
485 	}
486 
487 	ret = 0;
488 out:
489 	drm_mm_for_each_node_safe(node, next, &mm)
490 		drm_mm_remove_node(node);
491 	drm_mm_takedown(&mm);
492 	vfree(nodes);
493 err_order:
494 	kfree(order);
495 err:
496 	return ret;
497 }
498 
igt_reserve(void * ignored)499 static int igt_reserve(void *ignored)
500 {
501 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
502 	int n, ret;
503 
504 	for_each_prime_number_from(n, 1, 54) {
505 		u64 size = BIT_ULL(n);
506 
507 		ret = __igt_reserve(count, size - 1);
508 		if (ret)
509 			return ret;
510 
511 		ret = __igt_reserve(count, size);
512 		if (ret)
513 			return ret;
514 
515 		ret = __igt_reserve(count, size + 1);
516 		if (ret)
517 			return ret;
518 
519 		cond_resched();
520 	}
521 
522 	return 0;
523 }
524 
expect_insert(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode)525 static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
526 			  u64 size, u64 alignment, unsigned long color,
527 			  const struct insert_mode *mode)
528 {
529 	int err;
530 
531 	err = drm_mm_insert_node_generic(mm, node,
532 					 size, alignment, color,
533 					 mode->mode);
534 	if (err) {
535 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
536 		       size, alignment, color, mode->name, err);
537 		return false;
538 	}
539 
540 	if (!assert_node(node, mm, size, alignment, color)) {
541 		drm_mm_remove_node(node);
542 		return false;
543 	}
544 
545 	return true;
546 }
547 
expect_insert_fail(struct drm_mm * mm,u64 size)548 static bool expect_insert_fail(struct drm_mm *mm, u64 size)
549 {
550 	struct drm_mm_node tmp = {};
551 	int err;
552 
553 	err = drm_mm_insert_node(mm, &tmp, size);
554 	if (likely(err == -ENOSPC))
555 		return true;
556 
557 	if (!err) {
558 		pr_err("impossible insert succeeded, node %llu + %llu\n",
559 		       tmp.start, tmp.size);
560 		drm_mm_remove_node(&tmp);
561 	} else {
562 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
563 		       err, -ENOSPC, size);
564 	}
565 	return false;
566 }
567 
__igt_insert(unsigned int count,u64 size,bool replace)568 static int __igt_insert(unsigned int count, u64 size, bool replace)
569 {
570 	DRM_RND_STATE(prng, random_seed);
571 	const struct insert_mode *mode;
572 	struct drm_mm mm;
573 	struct drm_mm_node *nodes, *node, *next;
574 	unsigned int *order, n, m, o = 0;
575 	int ret;
576 
577 	/* Fill a range with lots of nodes, check it doesn't fail too early */
578 
579 	DRM_MM_BUG_ON(!count);
580 	DRM_MM_BUG_ON(!size);
581 
582 	ret = -ENOMEM;
583 	nodes = vmalloc(array_size(count, sizeof(*nodes)));
584 	if (!nodes)
585 		goto err;
586 
587 	order = drm_random_order(count, &prng);
588 	if (!order)
589 		goto err_nodes;
590 
591 	ret = -EINVAL;
592 	drm_mm_init(&mm, 0, count * size);
593 
594 	for (mode = insert_modes; mode->name; mode++) {
595 		for (n = 0; n < count; n++) {
596 			struct drm_mm_node tmp;
597 
598 			node = replace ? &tmp : &nodes[n];
599 			memset(node, 0, sizeof(*node));
600 			if (!expect_insert(&mm, node, size, 0, n, mode)) {
601 				pr_err("%s insert failed, size %llu step %d\n",
602 				       mode->name, size, n);
603 				goto out;
604 			}
605 
606 			if (replace) {
607 				drm_mm_replace_node(&tmp, &nodes[n]);
608 				if (drm_mm_node_allocated(&tmp)) {
609 					pr_err("replaced old-node still allocated! step %d\n",
610 					       n);
611 					goto out;
612 				}
613 
614 				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
615 					pr_err("replaced node did not inherit parameters, size %llu step %d\n",
616 					       size, n);
617 					goto out;
618 				}
619 
620 				if (tmp.start != nodes[n].start) {
621 					pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
622 					       tmp.start, size,
623 					       nodes[n].start, nodes[n].size);
624 					goto out;
625 				}
626 			}
627 		}
628 
629 		/* After random insertion the nodes should be in order */
630 		if (!assert_continuous(&mm, size))
631 			goto out;
632 
633 		/* Repeated use should then fail */
634 		if (!expect_insert_fail(&mm, size))
635 			goto out;
636 
637 		/* Remove one and reinsert, as the only hole it should refill itself */
638 		for (n = 0; n < count; n++) {
639 			u64 addr = nodes[n].start;
640 
641 			drm_mm_remove_node(&nodes[n]);
642 			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
643 				pr_err("%s reinsert failed, size %llu step %d\n",
644 				       mode->name, size, n);
645 				goto out;
646 			}
647 
648 			if (nodes[n].start != addr) {
649 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
650 				       mode->name, n, addr, nodes[n].start);
651 				goto out;
652 			}
653 
654 			if (!assert_continuous(&mm, size))
655 				goto out;
656 		}
657 
658 		/* Remove several, reinsert, check full */
659 		for_each_prime_number(n, min(max_prime, count)) {
660 			for (m = 0; m < n; m++) {
661 				node = &nodes[order[(o + m) % count]];
662 				drm_mm_remove_node(node);
663 			}
664 
665 			for (m = 0; m < n; m++) {
666 				node = &nodes[order[(o + m) % count]];
667 				if (!expect_insert(&mm, node, size, 0, n, mode)) {
668 					pr_err("%s multiple reinsert failed, size %llu step %d\n",
669 					       mode->name, size, n);
670 					goto out;
671 				}
672 			}
673 
674 			o += n;
675 
676 			if (!assert_continuous(&mm, size))
677 				goto out;
678 
679 			if (!expect_insert_fail(&mm, size))
680 				goto out;
681 		}
682 
683 		drm_mm_for_each_node_safe(node, next, &mm)
684 			drm_mm_remove_node(node);
685 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
686 
687 		cond_resched();
688 	}
689 
690 	ret = 0;
691 out:
692 	drm_mm_for_each_node_safe(node, next, &mm)
693 		drm_mm_remove_node(node);
694 	drm_mm_takedown(&mm);
695 	kfree(order);
696 err_nodes:
697 	vfree(nodes);
698 err:
699 	return ret;
700 }
701 
igt_insert(void * ignored)702 static int igt_insert(void *ignored)
703 {
704 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
705 	unsigned int n;
706 	int ret;
707 
708 	for_each_prime_number_from(n, 1, 54) {
709 		u64 size = BIT_ULL(n);
710 
711 		ret = __igt_insert(count, size - 1, false);
712 		if (ret)
713 			return ret;
714 
715 		ret = __igt_insert(count, size, false);
716 		if (ret)
717 			return ret;
718 
719 		ret = __igt_insert(count, size + 1, false);
720 		if (ret)
721 			return ret;
722 
723 		cond_resched();
724 	}
725 
726 	return 0;
727 }
728 
igt_replace(void * ignored)729 static int igt_replace(void *ignored)
730 {
731 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
732 	unsigned int n;
733 	int ret;
734 
735 	/* Reuse igt_insert to exercise replacement by inserting a dummy node,
736 	 * then replacing it with the intended node. We want to check that
737 	 * the tree is intact and all the information we need is carried
738 	 * across to the target node.
739 	 */
740 
741 	for_each_prime_number_from(n, 1, 54) {
742 		u64 size = BIT_ULL(n);
743 
744 		ret = __igt_insert(count, size - 1, true);
745 		if (ret)
746 			return ret;
747 
748 		ret = __igt_insert(count, size, true);
749 		if (ret)
750 			return ret;
751 
752 		ret = __igt_insert(count, size + 1, true);
753 		if (ret)
754 			return ret;
755 
756 		cond_resched();
757 	}
758 
759 	return 0;
760 }
761 
expect_insert_in_range(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,const struct insert_mode * mode)762 static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
763 				   u64 size, u64 alignment, unsigned long color,
764 				   u64 range_start, u64 range_end,
765 				   const struct insert_mode *mode)
766 {
767 	int err;
768 
769 	err = drm_mm_insert_node_in_range(mm, node,
770 					  size, alignment, color,
771 					  range_start, range_end,
772 					  mode->mode);
773 	if (err) {
774 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
775 		       size, alignment, color, mode->name,
776 		       range_start, range_end, err);
777 		return false;
778 	}
779 
780 	if (!assert_node(node, mm, size, alignment, color)) {
781 		drm_mm_remove_node(node);
782 		return false;
783 	}
784 
785 	return true;
786 }
787 
expect_insert_in_range_fail(struct drm_mm * mm,u64 size,u64 range_start,u64 range_end)788 static bool expect_insert_in_range_fail(struct drm_mm *mm,
789 					u64 size,
790 					u64 range_start,
791 					u64 range_end)
792 {
793 	struct drm_mm_node tmp = {};
794 	int err;
795 
796 	err = drm_mm_insert_node_in_range(mm, &tmp,
797 					  size, 0, 0,
798 					  range_start, range_end,
799 					  0);
800 	if (likely(err == -ENOSPC))
801 		return true;
802 
803 	if (!err) {
804 		pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
805 		       tmp.start, tmp.size, range_start, range_end);
806 		drm_mm_remove_node(&tmp);
807 	} else {
808 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
809 		       err, -ENOSPC, size, range_start, range_end);
810 	}
811 
812 	return false;
813 }
814 
assert_contiguous_in_range(struct drm_mm * mm,u64 size,u64 start,u64 end)815 static bool assert_contiguous_in_range(struct drm_mm *mm,
816 				       u64 size,
817 				       u64 start,
818 				       u64 end)
819 {
820 	struct drm_mm_node *node;
821 	unsigned int n;
822 
823 	if (!expect_insert_in_range_fail(mm, size, start, end))
824 		return false;
825 
826 	n = div64_u64(start + size - 1, size);
827 	drm_mm_for_each_node(node, mm) {
828 		if (node->start < start || node->start + node->size > end) {
829 			pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
830 			       n, node->start, node->start + node->size, start, end);
831 			return false;
832 		}
833 
834 		if (node->start != n * size) {
835 			pr_err("node %d out of order, expected start %llx, found %llx\n",
836 			       n, n * size, node->start);
837 			return false;
838 		}
839 
840 		if (node->size != size) {
841 			pr_err("node %d has wrong size, expected size %llx, found %llx\n",
842 			       n, size, node->size);
843 			return false;
844 		}
845 
846 		if (drm_mm_hole_follows(node) &&
847 		    drm_mm_hole_node_end(node) < end) {
848 			pr_err("node %d is followed by a hole!\n", n);
849 			return false;
850 		}
851 
852 		n++;
853 	}
854 
855 	if (start > 0) {
856 		node = __drm_mm_interval_first(mm, 0, start - 1);
857 		if (node->allocated) {
858 			pr_err("node before start: node=%llx+%llu, start=%llx\n",
859 			       node->start, node->size, start);
860 			return false;
861 		}
862 	}
863 
864 	if (end < U64_MAX) {
865 		node = __drm_mm_interval_first(mm, end, U64_MAX);
866 		if (node->allocated) {
867 			pr_err("node after end: node=%llx+%llu, end=%llx\n",
868 			       node->start, node->size, end);
869 			return false;
870 		}
871 	}
872 
873 	return true;
874 }
875 
__igt_insert_range(unsigned int count,u64 size,u64 start,u64 end)876 static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
877 {
878 	const struct insert_mode *mode;
879 	struct drm_mm mm;
880 	struct drm_mm_node *nodes, *node, *next;
881 	unsigned int n, start_n, end_n;
882 	int ret;
883 
884 	DRM_MM_BUG_ON(!count);
885 	DRM_MM_BUG_ON(!size);
886 	DRM_MM_BUG_ON(end <= start);
887 
888 	/* Very similar to __igt_insert(), but now instead of populating the
889 	 * full range of the drm_mm, we try to fill a small portion of it.
890 	 */
891 
892 	ret = -ENOMEM;
893 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
894 	if (!nodes)
895 		goto err;
896 
897 	ret = -EINVAL;
898 	drm_mm_init(&mm, 0, count * size);
899 
900 	start_n = div64_u64(start + size - 1, size);
901 	end_n = div64_u64(end - size, size);
902 
903 	for (mode = insert_modes; mode->name; mode++) {
904 		for (n = start_n; n <= end_n; n++) {
905 			if (!expect_insert_in_range(&mm, &nodes[n],
906 						    size, size, n,
907 						    start, end, mode)) {
908 				pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
909 				       mode->name, size, n,
910 				       start_n, end_n,
911 				       start, end);
912 				goto out;
913 			}
914 		}
915 
916 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
917 			pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
918 			       mode->name, start, end, size);
919 			goto out;
920 		}
921 
922 		/* Remove one and reinsert, it should refill itself */
923 		for (n = start_n; n <= end_n; n++) {
924 			u64 addr = nodes[n].start;
925 
926 			drm_mm_remove_node(&nodes[n]);
927 			if (!expect_insert_in_range(&mm, &nodes[n],
928 						    size, size, n,
929 						    start, end, mode)) {
930 				pr_err("%s reinsert failed, step %d\n", mode->name, n);
931 				goto out;
932 			}
933 
934 			if (nodes[n].start != addr) {
935 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
936 				       mode->name, n, addr, nodes[n].start);
937 				goto out;
938 			}
939 		}
940 
941 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
942 			pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
943 			       mode->name, start, end, size);
944 			goto out;
945 		}
946 
947 		drm_mm_for_each_node_safe(node, next, &mm)
948 			drm_mm_remove_node(node);
949 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
950 
951 		cond_resched();
952 	}
953 
954 	ret = 0;
955 out:
956 	drm_mm_for_each_node_safe(node, next, &mm)
957 		drm_mm_remove_node(node);
958 	drm_mm_takedown(&mm);
959 	vfree(nodes);
960 err:
961 	return ret;
962 }
963 
insert_outside_range(void)964 static int insert_outside_range(void)
965 {
966 	struct drm_mm mm;
967 	const unsigned int start = 1024;
968 	const unsigned int end = 2048;
969 	const unsigned int size = end - start;
970 
971 	drm_mm_init(&mm, start, size);
972 
973 	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
974 		return -EINVAL;
975 
976 	if (!expect_insert_in_range_fail(&mm, size,
977 					 start - size/2, start + (size+1)/2))
978 		return -EINVAL;
979 
980 	if (!expect_insert_in_range_fail(&mm, size,
981 					 end - (size+1)/2, end + size/2))
982 		return -EINVAL;
983 
984 	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
985 		return -EINVAL;
986 
987 	drm_mm_takedown(&mm);
988 	return 0;
989 }
990 
igt_insert_range(void * ignored)991 static int igt_insert_range(void *ignored)
992 {
993 	const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
994 	unsigned int n;
995 	int ret;
996 
997 	/* Check that requests outside the bounds of drm_mm are rejected. */
998 	ret = insert_outside_range();
999 	if (ret)
1000 		return ret;
1001 
1002 	for_each_prime_number_from(n, 1, 50) {
1003 		const u64 size = BIT_ULL(n);
1004 		const u64 max = count * size;
1005 
1006 		ret = __igt_insert_range(count, size, 0, max);
1007 		if (ret)
1008 			return ret;
1009 
1010 		ret = __igt_insert_range(count, size, 1, max);
1011 		if (ret)
1012 			return ret;
1013 
1014 		ret = __igt_insert_range(count, size, 0, max - 1);
1015 		if (ret)
1016 			return ret;
1017 
1018 		ret = __igt_insert_range(count, size, 0, max/2);
1019 		if (ret)
1020 			return ret;
1021 
1022 		ret = __igt_insert_range(count, size, max/2, max);
1023 		if (ret)
1024 			return ret;
1025 
1026 		ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1027 		if (ret)
1028 			return ret;
1029 
1030 		cond_resched();
1031 	}
1032 
1033 	return 0;
1034 }
1035 
igt_align(void * ignored)1036 static int igt_align(void *ignored)
1037 {
1038 	const struct insert_mode *mode;
1039 	const unsigned int max_count = min(8192u, max_prime);
1040 	struct drm_mm mm;
1041 	struct drm_mm_node *nodes, *node, *next;
1042 	unsigned int prime;
1043 	int ret = -EINVAL;
1044 
1045 	/* For each of the possible insertion modes, we pick a few
1046 	 * arbitrary alignments and check that the inserted node
1047 	 * meets our requirements.
1048 	 */
1049 
1050 	nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1051 	if (!nodes)
1052 		goto err;
1053 
1054 	drm_mm_init(&mm, 1, U64_MAX - 2);
1055 
1056 	for (mode = insert_modes; mode->name; mode++) {
1057 		unsigned int i = 0;
1058 
1059 		for_each_prime_number_from(prime, 1, max_count) {
1060 			u64 size = next_prime_number(prime);
1061 
1062 			if (!expect_insert(&mm, &nodes[i],
1063 					   size, prime, i,
1064 					   mode)) {
1065 				pr_err("%s insert failed with alignment=%d",
1066 				       mode->name, prime);
1067 				goto out;
1068 			}
1069 
1070 			i++;
1071 		}
1072 
1073 		drm_mm_for_each_node_safe(node, next, &mm)
1074 			drm_mm_remove_node(node);
1075 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1076 
1077 		cond_resched();
1078 	}
1079 
1080 	ret = 0;
1081 out:
1082 	drm_mm_for_each_node_safe(node, next, &mm)
1083 		drm_mm_remove_node(node);
1084 	drm_mm_takedown(&mm);
1085 	vfree(nodes);
1086 err:
1087 	return ret;
1088 }
1089 
igt_align_pot(int max)1090 static int igt_align_pot(int max)
1091 {
1092 	struct drm_mm mm;
1093 	struct drm_mm_node *node, *next;
1094 	int bit;
1095 	int ret = -EINVAL;
1096 
1097 	/* Check that we can align to the full u64 address space */
1098 
1099 	drm_mm_init(&mm, 1, U64_MAX - 2);
1100 
1101 	for (bit = max - 1; bit; bit--) {
1102 		u64 align, size;
1103 
1104 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1105 		if (!node) {
1106 			ret = -ENOMEM;
1107 			goto out;
1108 		}
1109 
1110 		align = BIT_ULL(bit);
1111 		size = BIT_ULL(bit-1) + 1;
1112 		if (!expect_insert(&mm, node,
1113 				   size, align, bit,
1114 				   &insert_modes[0])) {
1115 			pr_err("insert failed with alignment=%llx [%d]",
1116 			       align, bit);
1117 			goto out;
1118 		}
1119 
1120 		cond_resched();
1121 	}
1122 
1123 	ret = 0;
1124 out:
1125 	drm_mm_for_each_node_safe(node, next, &mm) {
1126 		drm_mm_remove_node(node);
1127 		kfree(node);
1128 	}
1129 	drm_mm_takedown(&mm);
1130 	return ret;
1131 }
1132 
igt_align32(void * ignored)1133 static int igt_align32(void *ignored)
1134 {
1135 	return igt_align_pot(32);
1136 }
1137 
igt_align64(void * ignored)1138 static int igt_align64(void *ignored)
1139 {
1140 	return igt_align_pot(64);
1141 }
1142 
show_scan(const struct drm_mm_scan * scan)1143 static void show_scan(const struct drm_mm_scan *scan)
1144 {
1145 	pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1146 		scan->hit_start, scan->hit_end,
1147 		scan->size, scan->alignment, scan->color);
1148 }
1149 
show_holes(const struct drm_mm * mm,int count)1150 static void show_holes(const struct drm_mm *mm, int count)
1151 {
1152 	u64 hole_start, hole_end;
1153 	struct drm_mm_node *hole;
1154 
1155 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1156 		struct drm_mm_node *next = list_next_entry(hole, node_list);
1157 		const char *node1 = NULL, *node2 = NULL;
1158 
1159 		if (hole->allocated)
1160 			node1 = kasprintf(GFP_KERNEL,
1161 					  "[%llx + %lld, color=%ld], ",
1162 					  hole->start, hole->size, hole->color);
1163 
1164 		if (next->allocated)
1165 			node2 = kasprintf(GFP_KERNEL,
1166 					  ", [%llx + %lld, color=%ld]",
1167 					  next->start, next->size, next->color);
1168 
1169 		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1170 			node1,
1171 			hole_start, hole_end, hole_end - hole_start,
1172 			node2);
1173 
1174 		kfree(node2);
1175 		kfree(node1);
1176 
1177 		if (!--count)
1178 			break;
1179 	}
1180 }
1181 
1182 struct evict_node {
1183 	struct drm_mm_node node;
1184 	struct list_head link;
1185 };
1186 
evict_nodes(struct drm_mm_scan * scan,struct evict_node * nodes,unsigned int * order,unsigned int count,bool use_color,struct list_head * evict_list)1187 static bool evict_nodes(struct drm_mm_scan *scan,
1188 			struct evict_node *nodes,
1189 			unsigned int *order,
1190 			unsigned int count,
1191 			bool use_color,
1192 			struct list_head *evict_list)
1193 {
1194 	struct evict_node *e, *en;
1195 	unsigned int i;
1196 
1197 	for (i = 0; i < count; i++) {
1198 		e = &nodes[order ? order[i] : i];
1199 		list_add(&e->link, evict_list);
1200 		if (drm_mm_scan_add_block(scan, &e->node))
1201 			break;
1202 	}
1203 	list_for_each_entry_safe(e, en, evict_list, link) {
1204 		if (!drm_mm_scan_remove_block(scan, &e->node))
1205 			list_del(&e->link);
1206 	}
1207 	if (list_empty(evict_list)) {
1208 		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1209 		       scan->size, count, scan->alignment, scan->color);
1210 		return false;
1211 	}
1212 
1213 	list_for_each_entry(e, evict_list, link)
1214 		drm_mm_remove_node(&e->node);
1215 
1216 	if (use_color) {
1217 		struct drm_mm_node *node;
1218 
1219 		while ((node = drm_mm_scan_color_evict(scan))) {
1220 			e = container_of(node, typeof(*e), node);
1221 			drm_mm_remove_node(&e->node);
1222 			list_add(&e->link, evict_list);
1223 		}
1224 	} else {
1225 		if (drm_mm_scan_color_evict(scan)) {
1226 			pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1227 			return false;
1228 		}
1229 	}
1230 
1231 	return true;
1232 }
1233 
evict_nothing(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1234 static bool evict_nothing(struct drm_mm *mm,
1235 			  unsigned int total_size,
1236 			  struct evict_node *nodes)
1237 {
1238 	struct drm_mm_scan scan;
1239 	LIST_HEAD(evict_list);
1240 	struct evict_node *e;
1241 	struct drm_mm_node *node;
1242 	unsigned int n;
1243 
1244 	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1245 	for (n = 0; n < total_size; n++) {
1246 		e = &nodes[n];
1247 		list_add(&e->link, &evict_list);
1248 		drm_mm_scan_add_block(&scan, &e->node);
1249 	}
1250 	list_for_each_entry(e, &evict_list, link)
1251 		drm_mm_scan_remove_block(&scan, &e->node);
1252 
1253 	for (n = 0; n < total_size; n++) {
1254 		e = &nodes[n];
1255 
1256 		if (!drm_mm_node_allocated(&e->node)) {
1257 			pr_err("node[%d] no longer allocated!\n", n);
1258 			return false;
1259 		}
1260 
1261 		e->link.next = NULL;
1262 	}
1263 
1264 	drm_mm_for_each_node(node, mm) {
1265 		e = container_of(node, typeof(*e), node);
1266 		e->link.next = &e->link;
1267 	}
1268 
1269 	for (n = 0; n < total_size; n++) {
1270 		e = &nodes[n];
1271 
1272 		if (!e->link.next) {
1273 			pr_err("node[%d] no longer connected!\n", n);
1274 			return false;
1275 		}
1276 	}
1277 
1278 	return assert_continuous(mm, nodes[0].node.size);
1279 }
1280 
evict_everything(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1281 static bool evict_everything(struct drm_mm *mm,
1282 			     unsigned int total_size,
1283 			     struct evict_node *nodes)
1284 {
1285 	struct drm_mm_scan scan;
1286 	LIST_HEAD(evict_list);
1287 	struct evict_node *e;
1288 	unsigned int n;
1289 	int err;
1290 
1291 	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1292 	for (n = 0; n < total_size; n++) {
1293 		e = &nodes[n];
1294 		list_add(&e->link, &evict_list);
1295 		if (drm_mm_scan_add_block(&scan, &e->node))
1296 			break;
1297 	}
1298 
1299 	err = 0;
1300 	list_for_each_entry(e, &evict_list, link) {
1301 		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1302 			if (!err) {
1303 				pr_err("Node %lld not marked for eviction!\n",
1304 				       e->node.start);
1305 				err = -EINVAL;
1306 			}
1307 		}
1308 	}
1309 	if (err)
1310 		return false;
1311 
1312 	list_for_each_entry(e, &evict_list, link)
1313 		drm_mm_remove_node(&e->node);
1314 
1315 	if (!assert_one_hole(mm, 0, total_size))
1316 		return false;
1317 
1318 	list_for_each_entry(e, &evict_list, link) {
1319 		err = drm_mm_reserve_node(mm, &e->node);
1320 		if (err) {
1321 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1322 			       e->node.start);
1323 			return false;
1324 		}
1325 	}
1326 
1327 	return assert_continuous(mm, nodes[0].node.size);
1328 }
1329 
evict_something(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,const struct insert_mode * mode)1330 static int evict_something(struct drm_mm *mm,
1331 			   u64 range_start, u64 range_end,
1332 			   struct evict_node *nodes,
1333 			   unsigned int *order,
1334 			   unsigned int count,
1335 			   unsigned int size,
1336 			   unsigned int alignment,
1337 			   const struct insert_mode *mode)
1338 {
1339 	struct drm_mm_scan scan;
1340 	LIST_HEAD(evict_list);
1341 	struct evict_node *e;
1342 	struct drm_mm_node tmp;
1343 	int err;
1344 
1345 	drm_mm_scan_init_with_range(&scan, mm,
1346 				    size, alignment, 0,
1347 				    range_start, range_end,
1348 				    mode->mode);
1349 	if (!evict_nodes(&scan,
1350 			 nodes, order, count, false,
1351 			 &evict_list))
1352 		return -EINVAL;
1353 
1354 	memset(&tmp, 0, sizeof(tmp));
1355 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1356 					 DRM_MM_INSERT_EVICT);
1357 	if (err) {
1358 		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1359 		       size, alignment);
1360 		show_scan(&scan);
1361 		show_holes(mm, 3);
1362 		return err;
1363 	}
1364 
1365 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1366 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1367 		       tmp.start, tmp.size, range_start, range_end);
1368 		err = -EINVAL;
1369 	}
1370 
1371 	if (!assert_node(&tmp, mm, size, alignment, 0) ||
1372 	    drm_mm_hole_follows(&tmp)) {
1373 		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1374 		       tmp.size, size,
1375 		       alignment, misalignment(&tmp, alignment),
1376 		       tmp.start, drm_mm_hole_follows(&tmp));
1377 		err = -EINVAL;
1378 	}
1379 
1380 	drm_mm_remove_node(&tmp);
1381 	if (err)
1382 		return err;
1383 
1384 	list_for_each_entry(e, &evict_list, link) {
1385 		err = drm_mm_reserve_node(mm, &e->node);
1386 		if (err) {
1387 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1388 			       e->node.start);
1389 			return err;
1390 		}
1391 	}
1392 
1393 	if (!assert_continuous(mm, nodes[0].node.size)) {
1394 		pr_err("range is no longer continuous\n");
1395 		return -EINVAL;
1396 	}
1397 
1398 	return 0;
1399 }
1400 
igt_evict(void * ignored)1401 static int igt_evict(void *ignored)
1402 {
1403 	DRM_RND_STATE(prng, random_seed);
1404 	const unsigned int size = 8192;
1405 	const struct insert_mode *mode;
1406 	struct drm_mm mm;
1407 	struct evict_node *nodes;
1408 	struct drm_mm_node *node, *next;
1409 	unsigned int *order, n;
1410 	int ret, err;
1411 
1412 	/* Here we populate a full drm_mm and then try and insert a new node
1413 	 * by evicting other nodes in a random order. The drm_mm_scan should
1414 	 * pick the first matching hole it finds from the random list. We
1415 	 * repeat that for different allocation strategies, alignments and
1416 	 * sizes to try and stress the hole finder.
1417 	 */
1418 
1419 	ret = -ENOMEM;
1420 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1421 	if (!nodes)
1422 		goto err;
1423 
1424 	order = drm_random_order(size, &prng);
1425 	if (!order)
1426 		goto err_nodes;
1427 
1428 	ret = -EINVAL;
1429 	drm_mm_init(&mm, 0, size);
1430 	for (n = 0; n < size; n++) {
1431 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1432 		if (err) {
1433 			pr_err("insert failed, step %d\n", n);
1434 			ret = err;
1435 			goto out;
1436 		}
1437 	}
1438 
1439 	/* First check that using the scanner doesn't break the mm */
1440 	if (!evict_nothing(&mm, size, nodes)) {
1441 		pr_err("evict_nothing() failed\n");
1442 		goto out;
1443 	}
1444 	if (!evict_everything(&mm, size, nodes)) {
1445 		pr_err("evict_everything() failed\n");
1446 		goto out;
1447 	}
1448 
1449 	for (mode = evict_modes; mode->name; mode++) {
1450 		for (n = 1; n <= size; n <<= 1) {
1451 			drm_random_reorder(order, size, &prng);
1452 			err = evict_something(&mm, 0, U64_MAX,
1453 					      nodes, order, size,
1454 					      n, 1,
1455 					      mode);
1456 			if (err) {
1457 				pr_err("%s evict_something(size=%u) failed\n",
1458 				       mode->name, n);
1459 				ret = err;
1460 				goto out;
1461 			}
1462 		}
1463 
1464 		for (n = 1; n < size; n <<= 1) {
1465 			drm_random_reorder(order, size, &prng);
1466 			err = evict_something(&mm, 0, U64_MAX,
1467 					      nodes, order, size,
1468 					      size/2, n,
1469 					      mode);
1470 			if (err) {
1471 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1472 				       mode->name, size/2, n);
1473 				ret = err;
1474 				goto out;
1475 			}
1476 		}
1477 
1478 		for_each_prime_number_from(n, 1, min(size, max_prime)) {
1479 			unsigned int nsize = (size - n + 1) / 2;
1480 
1481 			DRM_MM_BUG_ON(!nsize);
1482 
1483 			drm_random_reorder(order, size, &prng);
1484 			err = evict_something(&mm, 0, U64_MAX,
1485 					      nodes, order, size,
1486 					      nsize, n,
1487 					      mode);
1488 			if (err) {
1489 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1490 				       mode->name, nsize, n);
1491 				ret = err;
1492 				goto out;
1493 			}
1494 		}
1495 
1496 		cond_resched();
1497 	}
1498 
1499 	ret = 0;
1500 out:
1501 	drm_mm_for_each_node_safe(node, next, &mm)
1502 		drm_mm_remove_node(node);
1503 	drm_mm_takedown(&mm);
1504 	kfree(order);
1505 err_nodes:
1506 	vfree(nodes);
1507 err:
1508 	return ret;
1509 }
1510 
igt_evict_range(void * ignored)1511 static int igt_evict_range(void *ignored)
1512 {
1513 	DRM_RND_STATE(prng, random_seed);
1514 	const unsigned int size = 8192;
1515 	const unsigned int range_size = size / 2;
1516 	const unsigned int range_start = size / 4;
1517 	const unsigned int range_end = range_start + range_size;
1518 	const struct insert_mode *mode;
1519 	struct drm_mm mm;
1520 	struct evict_node *nodes;
1521 	struct drm_mm_node *node, *next;
1522 	unsigned int *order, n;
1523 	int ret, err;
1524 
1525 	/* Like igt_evict() but now we are limiting the search to a
1526 	 * small portion of the full drm_mm.
1527 	 */
1528 
1529 	ret = -ENOMEM;
1530 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1531 	if (!nodes)
1532 		goto err;
1533 
1534 	order = drm_random_order(size, &prng);
1535 	if (!order)
1536 		goto err_nodes;
1537 
1538 	ret = -EINVAL;
1539 	drm_mm_init(&mm, 0, size);
1540 	for (n = 0; n < size; n++) {
1541 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1542 		if (err) {
1543 			pr_err("insert failed, step %d\n", n);
1544 			ret = err;
1545 			goto out;
1546 		}
1547 	}
1548 
1549 	for (mode = evict_modes; mode->name; mode++) {
1550 		for (n = 1; n <= range_size; n <<= 1) {
1551 			drm_random_reorder(order, size, &prng);
1552 			err = evict_something(&mm, range_start, range_end,
1553 					      nodes, order, size,
1554 					      n, 1,
1555 					      mode);
1556 			if (err) {
1557 				pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1558 				       mode->name, n, range_start, range_end);
1559 				goto out;
1560 			}
1561 		}
1562 
1563 		for (n = 1; n <= range_size; n <<= 1) {
1564 			drm_random_reorder(order, size, &prng);
1565 			err = evict_something(&mm, range_start, range_end,
1566 					      nodes, order, size,
1567 					      range_size/2, n,
1568 					      mode);
1569 			if (err) {
1570 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1571 				       mode->name, range_size/2, n, range_start, range_end);
1572 				goto out;
1573 			}
1574 		}
1575 
1576 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1577 			unsigned int nsize = (range_size - n + 1) / 2;
1578 
1579 			DRM_MM_BUG_ON(!nsize);
1580 
1581 			drm_random_reorder(order, size, &prng);
1582 			err = evict_something(&mm, range_start, range_end,
1583 					      nodes, order, size,
1584 					      nsize, n,
1585 					      mode);
1586 			if (err) {
1587 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1588 				       mode->name, nsize, n, range_start, range_end);
1589 				goto out;
1590 			}
1591 		}
1592 
1593 		cond_resched();
1594 	}
1595 
1596 	ret = 0;
1597 out:
1598 	drm_mm_for_each_node_safe(node, next, &mm)
1599 		drm_mm_remove_node(node);
1600 	drm_mm_takedown(&mm);
1601 	kfree(order);
1602 err_nodes:
1603 	vfree(nodes);
1604 err:
1605 	return ret;
1606 }
1607 
node_index(const struct drm_mm_node * node)1608 static unsigned int node_index(const struct drm_mm_node *node)
1609 {
1610 	return div64_u64(node->start, node->size);
1611 }
1612 
igt_topdown(void * ignored)1613 static int igt_topdown(void *ignored)
1614 {
1615 	const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1616 	DRM_RND_STATE(prng, random_seed);
1617 	const unsigned int count = 8192;
1618 	unsigned int size;
1619 	unsigned long *bitmap;
1620 	struct drm_mm mm;
1621 	struct drm_mm_node *nodes, *node, *next;
1622 	unsigned int *order, n, m, o = 0;
1623 	int ret;
1624 
1625 	/* When allocating top-down, we expect to be returned a node
1626 	 * from a suitable hole at the top of the drm_mm. We check that
1627 	 * the returned node does match the highest available slot.
1628 	 */
1629 
1630 	ret = -ENOMEM;
1631 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1632 	if (!nodes)
1633 		goto err;
1634 
1635 	bitmap = bitmap_zalloc(count, GFP_KERNEL);
1636 	if (!bitmap)
1637 		goto err_nodes;
1638 
1639 	order = drm_random_order(count, &prng);
1640 	if (!order)
1641 		goto err_bitmap;
1642 
1643 	ret = -EINVAL;
1644 	for (size = 1; size <= 64; size <<= 1) {
1645 		drm_mm_init(&mm, 0, size*count);
1646 		for (n = 0; n < count; n++) {
1647 			if (!expect_insert(&mm, &nodes[n],
1648 					   size, 0, n,
1649 					   topdown)) {
1650 				pr_err("insert failed, size %u step %d\n", size, n);
1651 				goto out;
1652 			}
1653 
1654 			if (drm_mm_hole_follows(&nodes[n])) {
1655 				pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1656 				       n, nodes[n].start, size);
1657 				goto out;
1658 			}
1659 
1660 			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1661 				goto out;
1662 		}
1663 
1664 		if (!assert_continuous(&mm, size))
1665 			goto out;
1666 
1667 		drm_random_reorder(order, count, &prng);
1668 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1669 			for (m = 0; m < n; m++) {
1670 				node = &nodes[order[(o + m) % count]];
1671 				drm_mm_remove_node(node);
1672 				__set_bit(node_index(node), bitmap);
1673 			}
1674 
1675 			for (m = 0; m < n; m++) {
1676 				unsigned int last;
1677 
1678 				node = &nodes[order[(o + m) % count]];
1679 				if (!expect_insert(&mm, node,
1680 						   size, 0, 0,
1681 						   topdown)) {
1682 					pr_err("insert failed, step %d/%d\n", m, n);
1683 					goto out;
1684 				}
1685 
1686 				if (drm_mm_hole_follows(node)) {
1687 					pr_err("hole after topdown insert %d/%d, start=%llx\n",
1688 					       m, n, node->start);
1689 					goto out;
1690 				}
1691 
1692 				last = find_last_bit(bitmap, count);
1693 				if (node_index(node) != last) {
1694 					pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1695 					       m, n, size, last, node_index(node));
1696 					goto out;
1697 				}
1698 
1699 				__clear_bit(last, bitmap);
1700 			}
1701 
1702 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1703 
1704 			o += n;
1705 		}
1706 
1707 		drm_mm_for_each_node_safe(node, next, &mm)
1708 			drm_mm_remove_node(node);
1709 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1710 		cond_resched();
1711 	}
1712 
1713 	ret = 0;
1714 out:
1715 	drm_mm_for_each_node_safe(node, next, &mm)
1716 		drm_mm_remove_node(node);
1717 	drm_mm_takedown(&mm);
1718 	kfree(order);
1719 err_bitmap:
1720 	bitmap_free(bitmap);
1721 err_nodes:
1722 	vfree(nodes);
1723 err:
1724 	return ret;
1725 }
1726 
igt_bottomup(void * ignored)1727 static int igt_bottomup(void *ignored)
1728 {
1729 	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1730 	DRM_RND_STATE(prng, random_seed);
1731 	const unsigned int count = 8192;
1732 	unsigned int size;
1733 	unsigned long *bitmap;
1734 	struct drm_mm mm;
1735 	struct drm_mm_node *nodes, *node, *next;
1736 	unsigned int *order, n, m, o = 0;
1737 	int ret;
1738 
1739 	/* Like igt_topdown, but instead of searching for the last hole,
1740 	 * we search for the first.
1741 	 */
1742 
1743 	ret = -ENOMEM;
1744 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1745 	if (!nodes)
1746 		goto err;
1747 
1748 	bitmap = bitmap_zalloc(count, GFP_KERNEL);
1749 	if (!bitmap)
1750 		goto err_nodes;
1751 
1752 	order = drm_random_order(count, &prng);
1753 	if (!order)
1754 		goto err_bitmap;
1755 
1756 	ret = -EINVAL;
1757 	for (size = 1; size <= 64; size <<= 1) {
1758 		drm_mm_init(&mm, 0, size*count);
1759 		for (n = 0; n < count; n++) {
1760 			if (!expect_insert(&mm, &nodes[n],
1761 					   size, 0, n,
1762 					   bottomup)) {
1763 				pr_err("bottomup insert failed, size %u step %d\n", size, n);
1764 				goto out;
1765 			}
1766 
1767 			if (!assert_one_hole(&mm, size*(n + 1), size*count))
1768 				goto out;
1769 		}
1770 
1771 		if (!assert_continuous(&mm, size))
1772 			goto out;
1773 
1774 		drm_random_reorder(order, count, &prng);
1775 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1776 			for (m = 0; m < n; m++) {
1777 				node = &nodes[order[(o + m) % count]];
1778 				drm_mm_remove_node(node);
1779 				__set_bit(node_index(node), bitmap);
1780 			}
1781 
1782 			for (m = 0; m < n; m++) {
1783 				unsigned int first;
1784 
1785 				node = &nodes[order[(o + m) % count]];
1786 				if (!expect_insert(&mm, node,
1787 						   size, 0, 0,
1788 						   bottomup)) {
1789 					pr_err("insert failed, step %d/%d\n", m, n);
1790 					goto out;
1791 				}
1792 
1793 				first = find_first_bit(bitmap, count);
1794 				if (node_index(node) != first) {
1795 					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1796 					       m, n, first, node_index(node));
1797 					goto out;
1798 				}
1799 				__clear_bit(first, bitmap);
1800 			}
1801 
1802 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1803 
1804 			o += n;
1805 		}
1806 
1807 		drm_mm_for_each_node_safe(node, next, &mm)
1808 			drm_mm_remove_node(node);
1809 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1810 		cond_resched();
1811 	}
1812 
1813 	ret = 0;
1814 out:
1815 	drm_mm_for_each_node_safe(node, next, &mm)
1816 		drm_mm_remove_node(node);
1817 	drm_mm_takedown(&mm);
1818 	kfree(order);
1819 err_bitmap:
1820 	bitmap_free(bitmap);
1821 err_nodes:
1822 	vfree(nodes);
1823 err:
1824 	return ret;
1825 }
1826 
__igt_once(unsigned int mode)1827 static int __igt_once(unsigned int mode)
1828 {
1829 	struct drm_mm mm;
1830 	struct drm_mm_node rsvd_lo, rsvd_hi, node;
1831 	int err;
1832 
1833 	drm_mm_init(&mm, 0, 7);
1834 
1835 	memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1836 	rsvd_lo.start = 1;
1837 	rsvd_lo.size = 1;
1838 	err = drm_mm_reserve_node(&mm, &rsvd_lo);
1839 	if (err) {
1840 		pr_err("Could not reserve low node\n");
1841 		goto err;
1842 	}
1843 
1844 	memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1845 	rsvd_hi.start = 5;
1846 	rsvd_hi.size = 1;
1847 	err = drm_mm_reserve_node(&mm, &rsvd_hi);
1848 	if (err) {
1849 		pr_err("Could not reserve low node\n");
1850 		goto err_lo;
1851 	}
1852 
1853 	if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1854 		pr_err("Expected a hole after lo and high nodes!\n");
1855 		err = -EINVAL;
1856 		goto err_hi;
1857 	}
1858 
1859 	memset(&node, 0, sizeof(node));
1860 	err = drm_mm_insert_node_generic(&mm, &node,
1861 					 2, 0, 0,
1862 					 mode | DRM_MM_INSERT_ONCE);
1863 	if (!err) {
1864 		pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
1865 		       node.start);
1866 		err = -EINVAL;
1867 		goto err_node;
1868 	}
1869 
1870 	err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1871 	if (err) {
1872 		pr_err("Could not insert the node into the available hole!\n");
1873 		err = -EINVAL;
1874 		goto err_hi;
1875 	}
1876 
1877 err_node:
1878 	drm_mm_remove_node(&node);
1879 err_hi:
1880 	drm_mm_remove_node(&rsvd_hi);
1881 err_lo:
1882 	drm_mm_remove_node(&rsvd_lo);
1883 err:
1884 	drm_mm_takedown(&mm);
1885 	return err;
1886 }
1887 
igt_lowest(void * ignored)1888 static int igt_lowest(void *ignored)
1889 {
1890 	return __igt_once(DRM_MM_INSERT_LOW);
1891 }
1892 
igt_highest(void * ignored)1893 static int igt_highest(void *ignored)
1894 {
1895 	return __igt_once(DRM_MM_INSERT_HIGH);
1896 }
1897 
separate_adjacent_colors(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)1898 static void separate_adjacent_colors(const struct drm_mm_node *node,
1899 				     unsigned long color,
1900 				     u64 *start,
1901 				     u64 *end)
1902 {
1903 	if (node->allocated && node->color != color)
1904 		++*start;
1905 
1906 	node = list_next_entry(node, node_list);
1907 	if (node->allocated && node->color != color)
1908 		--*end;
1909 }
1910 
colors_abutt(const struct drm_mm_node * node)1911 static bool colors_abutt(const struct drm_mm_node *node)
1912 {
1913 	if (!drm_mm_hole_follows(node) &&
1914 	    list_next_entry(node, node_list)->allocated) {
1915 		pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1916 		       node->color, node->start, node->size,
1917 		       list_next_entry(node, node_list)->color,
1918 		       list_next_entry(node, node_list)->start,
1919 		       list_next_entry(node, node_list)->size);
1920 		return true;
1921 	}
1922 
1923 	return false;
1924 }
1925 
igt_color(void * ignored)1926 static int igt_color(void *ignored)
1927 {
1928 	const unsigned int count = min(4096u, max_iterations);
1929 	const struct insert_mode *mode;
1930 	struct drm_mm mm;
1931 	struct drm_mm_node *node, *nn;
1932 	unsigned int n;
1933 	int ret = -EINVAL, err;
1934 
1935 	/* Color adjustment complicates everything. First we just check
1936 	 * that when we insert a node we apply any color_adjustment callback.
1937 	 * The callback we use should ensure that there is a gap between
1938 	 * any two nodes, and so after each insertion we check that those
1939 	 * holes are inserted and that they are preserved.
1940 	 */
1941 
1942 	drm_mm_init(&mm, 0, U64_MAX);
1943 
1944 	for (n = 1; n <= count; n++) {
1945 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1946 		if (!node) {
1947 			ret = -ENOMEM;
1948 			goto out;
1949 		}
1950 
1951 		if (!expect_insert(&mm, node,
1952 				   n, 0, n,
1953 				   &insert_modes[0])) {
1954 			pr_err("insert failed, step %d\n", n);
1955 			kfree(node);
1956 			goto out;
1957 		}
1958 	}
1959 
1960 	drm_mm_for_each_node_safe(node, nn, &mm) {
1961 		if (node->color != node->size) {
1962 			pr_err("invalid color stored: expected %lld, found %ld\n",
1963 			       node->size, node->color);
1964 
1965 			goto out;
1966 		}
1967 
1968 		drm_mm_remove_node(node);
1969 		kfree(node);
1970 	}
1971 
1972 	/* Now, let's start experimenting with applying a color callback */
1973 	mm.color_adjust = separate_adjacent_colors;
1974 	for (mode = insert_modes; mode->name; mode++) {
1975 		u64 last;
1976 
1977 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1978 		if (!node) {
1979 			ret = -ENOMEM;
1980 			goto out;
1981 		}
1982 
1983 		node->size = 1 + 2*count;
1984 		node->color = node->size;
1985 
1986 		err = drm_mm_reserve_node(&mm, node);
1987 		if (err) {
1988 			pr_err("initial reserve failed!\n");
1989 			ret = err;
1990 			goto out;
1991 		}
1992 
1993 		last = node->start + node->size;
1994 
1995 		for (n = 1; n <= count; n++) {
1996 			int rem;
1997 
1998 			node = kzalloc(sizeof(*node), GFP_KERNEL);
1999 			if (!node) {
2000 				ret = -ENOMEM;
2001 				goto out;
2002 			}
2003 
2004 			node->start = last;
2005 			node->size = n + count;
2006 			node->color = node->size;
2007 
2008 			err = drm_mm_reserve_node(&mm, node);
2009 			if (err != -ENOSPC) {
2010 				pr_err("reserve %d did not report color overlap! err=%d\n",
2011 				       n, err);
2012 				goto out;
2013 			}
2014 
2015 			node->start += n + 1;
2016 			rem = misalignment(node, n + count);
2017 			node->start += n + count - rem;
2018 
2019 			err = drm_mm_reserve_node(&mm, node);
2020 			if (err) {
2021 				pr_err("reserve %d failed, err=%d\n", n, err);
2022 				ret = err;
2023 				goto out;
2024 			}
2025 
2026 			last = node->start + node->size;
2027 		}
2028 
2029 		for (n = 1; n <= count; n++) {
2030 			node = kzalloc(sizeof(*node), GFP_KERNEL);
2031 			if (!node) {
2032 				ret = -ENOMEM;
2033 				goto out;
2034 			}
2035 
2036 			if (!expect_insert(&mm, node,
2037 					   n, n, n,
2038 					   mode)) {
2039 				pr_err("%s insert failed, step %d\n",
2040 				       mode->name, n);
2041 				kfree(node);
2042 				goto out;
2043 			}
2044 		}
2045 
2046 		drm_mm_for_each_node_safe(node, nn, &mm) {
2047 			u64 rem;
2048 
2049 			if (node->color != node->size) {
2050 				pr_err("%s invalid color stored: expected %lld, found %ld\n",
2051 				       mode->name, node->size, node->color);
2052 
2053 				goto out;
2054 			}
2055 
2056 			if (colors_abutt(node))
2057 				goto out;
2058 
2059 			div64_u64_rem(node->start, node->size, &rem);
2060 			if (rem) {
2061 				pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2062 				       mode->name, node->start, node->size, rem);
2063 				goto out;
2064 			}
2065 
2066 			drm_mm_remove_node(node);
2067 			kfree(node);
2068 		}
2069 
2070 		cond_resched();
2071 	}
2072 
2073 	ret = 0;
2074 out:
2075 	drm_mm_for_each_node_safe(node, nn, &mm) {
2076 		drm_mm_remove_node(node);
2077 		kfree(node);
2078 	}
2079 	drm_mm_takedown(&mm);
2080 	return ret;
2081 }
2082 
evict_color(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,unsigned long color,const struct insert_mode * mode)2083 static int evict_color(struct drm_mm *mm,
2084 		       u64 range_start, u64 range_end,
2085 		       struct evict_node *nodes,
2086 		       unsigned int *order,
2087 		       unsigned int count,
2088 		       unsigned int size,
2089 		       unsigned int alignment,
2090 		       unsigned long color,
2091 		       const struct insert_mode *mode)
2092 {
2093 	struct drm_mm_scan scan;
2094 	LIST_HEAD(evict_list);
2095 	struct evict_node *e;
2096 	struct drm_mm_node tmp;
2097 	int err;
2098 
2099 	drm_mm_scan_init_with_range(&scan, mm,
2100 				    size, alignment, color,
2101 				    range_start, range_end,
2102 				    mode->mode);
2103 	if (!evict_nodes(&scan,
2104 			 nodes, order, count, true,
2105 			 &evict_list))
2106 		return -EINVAL;
2107 
2108 	memset(&tmp, 0, sizeof(tmp));
2109 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2110 					 DRM_MM_INSERT_EVICT);
2111 	if (err) {
2112 		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2113 		       size, alignment, color, err);
2114 		show_scan(&scan);
2115 		show_holes(mm, 3);
2116 		return err;
2117 	}
2118 
2119 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2120 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2121 		       tmp.start, tmp.size, range_start, range_end);
2122 		err = -EINVAL;
2123 	}
2124 
2125 	if (colors_abutt(&tmp))
2126 		err = -EINVAL;
2127 
2128 	if (!assert_node(&tmp, mm, size, alignment, color)) {
2129 		pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2130 		       tmp.size, size,
2131 		       alignment, misalignment(&tmp, alignment), tmp.start);
2132 		err = -EINVAL;
2133 	}
2134 
2135 	drm_mm_remove_node(&tmp);
2136 	if (err)
2137 		return err;
2138 
2139 	list_for_each_entry(e, &evict_list, link) {
2140 		err = drm_mm_reserve_node(mm, &e->node);
2141 		if (err) {
2142 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
2143 			       e->node.start);
2144 			return err;
2145 		}
2146 	}
2147 
2148 	cond_resched();
2149 	return 0;
2150 }
2151 
igt_color_evict(void * ignored)2152 static int igt_color_evict(void *ignored)
2153 {
2154 	DRM_RND_STATE(prng, random_seed);
2155 	const unsigned int total_size = min(8192u, max_iterations);
2156 	const struct insert_mode *mode;
2157 	unsigned long color = 0;
2158 	struct drm_mm mm;
2159 	struct evict_node *nodes;
2160 	struct drm_mm_node *node, *next;
2161 	unsigned int *order, n;
2162 	int ret, err;
2163 
2164 	/* Check that the drm_mm_scan also honours color adjustment when
2165 	 * choosing its victims to create a hole. Our color_adjust does not
2166 	 * allow two nodes to be placed together without an intervening hole
2167 	 * enlarging the set of victims that must be evicted.
2168 	 */
2169 
2170 	ret = -ENOMEM;
2171 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2172 	if (!nodes)
2173 		goto err;
2174 
2175 	order = drm_random_order(total_size, &prng);
2176 	if (!order)
2177 		goto err_nodes;
2178 
2179 	ret = -EINVAL;
2180 	drm_mm_init(&mm, 0, 2*total_size - 1);
2181 	mm.color_adjust = separate_adjacent_colors;
2182 	for (n = 0; n < total_size; n++) {
2183 		if (!expect_insert(&mm, &nodes[n].node,
2184 				   1, 0, color++,
2185 				   &insert_modes[0])) {
2186 			pr_err("insert failed, step %d\n", n);
2187 			goto out;
2188 		}
2189 	}
2190 
2191 	for (mode = evict_modes; mode->name; mode++) {
2192 		for (n = 1; n <= total_size; n <<= 1) {
2193 			drm_random_reorder(order, total_size, &prng);
2194 			err = evict_color(&mm, 0, U64_MAX,
2195 					  nodes, order, total_size,
2196 					  n, 1, color++,
2197 					  mode);
2198 			if (err) {
2199 				pr_err("%s evict_color(size=%u) failed\n",
2200 				       mode->name, n);
2201 				goto out;
2202 			}
2203 		}
2204 
2205 		for (n = 1; n < total_size; n <<= 1) {
2206 			drm_random_reorder(order, total_size, &prng);
2207 			err = evict_color(&mm, 0, U64_MAX,
2208 					  nodes, order, total_size,
2209 					  total_size/2, n, color++,
2210 					  mode);
2211 			if (err) {
2212 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2213 				       mode->name, total_size/2, n);
2214 				goto out;
2215 			}
2216 		}
2217 
2218 		for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2219 			unsigned int nsize = (total_size - n + 1) / 2;
2220 
2221 			DRM_MM_BUG_ON(!nsize);
2222 
2223 			drm_random_reorder(order, total_size, &prng);
2224 			err = evict_color(&mm, 0, U64_MAX,
2225 					  nodes, order, total_size,
2226 					  nsize, n, color++,
2227 					  mode);
2228 			if (err) {
2229 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2230 				       mode->name, nsize, n);
2231 				goto out;
2232 			}
2233 		}
2234 
2235 		cond_resched();
2236 	}
2237 
2238 	ret = 0;
2239 out:
2240 	if (ret)
2241 		show_mm(&mm);
2242 	drm_mm_for_each_node_safe(node, next, &mm)
2243 		drm_mm_remove_node(node);
2244 	drm_mm_takedown(&mm);
2245 	kfree(order);
2246 err_nodes:
2247 	vfree(nodes);
2248 err:
2249 	return ret;
2250 }
2251 
igt_color_evict_range(void * ignored)2252 static int igt_color_evict_range(void *ignored)
2253 {
2254 	DRM_RND_STATE(prng, random_seed);
2255 	const unsigned int total_size = 8192;
2256 	const unsigned int range_size = total_size / 2;
2257 	const unsigned int range_start = total_size / 4;
2258 	const unsigned int range_end = range_start + range_size;
2259 	const struct insert_mode *mode;
2260 	unsigned long color = 0;
2261 	struct drm_mm mm;
2262 	struct evict_node *nodes;
2263 	struct drm_mm_node *node, *next;
2264 	unsigned int *order, n;
2265 	int ret, err;
2266 
2267 	/* Like igt_color_evict(), but limited to small portion of the full
2268 	 * drm_mm range.
2269 	 */
2270 
2271 	ret = -ENOMEM;
2272 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2273 	if (!nodes)
2274 		goto err;
2275 
2276 	order = drm_random_order(total_size, &prng);
2277 	if (!order)
2278 		goto err_nodes;
2279 
2280 	ret = -EINVAL;
2281 	drm_mm_init(&mm, 0, 2*total_size - 1);
2282 	mm.color_adjust = separate_adjacent_colors;
2283 	for (n = 0; n < total_size; n++) {
2284 		if (!expect_insert(&mm, &nodes[n].node,
2285 				   1, 0, color++,
2286 				   &insert_modes[0])) {
2287 			pr_err("insert failed, step %d\n", n);
2288 			goto out;
2289 		}
2290 	}
2291 
2292 	for (mode = evict_modes; mode->name; mode++) {
2293 		for (n = 1; n <= range_size; n <<= 1) {
2294 			drm_random_reorder(order, range_size, &prng);
2295 			err = evict_color(&mm, range_start, range_end,
2296 					  nodes, order, total_size,
2297 					  n, 1, color++,
2298 					  mode);
2299 			if (err) {
2300 				pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2301 				       mode->name, n, range_start, range_end);
2302 				goto out;
2303 			}
2304 		}
2305 
2306 		for (n = 1; n < range_size; n <<= 1) {
2307 			drm_random_reorder(order, total_size, &prng);
2308 			err = evict_color(&mm, range_start, range_end,
2309 					  nodes, order, total_size,
2310 					  range_size/2, n, color++,
2311 					  mode);
2312 			if (err) {
2313 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2314 				       mode->name, total_size/2, n, range_start, range_end);
2315 				goto out;
2316 			}
2317 		}
2318 
2319 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2320 			unsigned int nsize = (range_size - n + 1) / 2;
2321 
2322 			DRM_MM_BUG_ON(!nsize);
2323 
2324 			drm_random_reorder(order, total_size, &prng);
2325 			err = evict_color(&mm, range_start, range_end,
2326 					  nodes, order, total_size,
2327 					  nsize, n, color++,
2328 					  mode);
2329 			if (err) {
2330 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2331 				       mode->name, nsize, n, range_start, range_end);
2332 				goto out;
2333 			}
2334 		}
2335 
2336 		cond_resched();
2337 	}
2338 
2339 	ret = 0;
2340 out:
2341 	if (ret)
2342 		show_mm(&mm);
2343 	drm_mm_for_each_node_safe(node, next, &mm)
2344 		drm_mm_remove_node(node);
2345 	drm_mm_takedown(&mm);
2346 	kfree(order);
2347 err_nodes:
2348 	vfree(nodes);
2349 err:
2350 	return ret;
2351 }
2352 
2353 #include "drm_selftest.c"
2354 
test_drm_mm_init(void)2355 static int __init test_drm_mm_init(void)
2356 {
2357 	int err;
2358 
2359 	while (!random_seed)
2360 		random_seed = get_random_int();
2361 
2362 	pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2363 		random_seed, max_iterations, max_prime);
2364 	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2365 
2366 	return err > 0 ? 0 : err;
2367 }
2368 
test_drm_mm_exit(void)2369 static void __exit test_drm_mm_exit(void)
2370 {
2371 }
2372 
2373 module_init(test_drm_mm_init);
2374 module_exit(test_drm_mm_exit);
2375 
2376 module_param(random_seed, uint, 0400);
2377 module_param(max_iterations, uint, 0400);
2378 module_param(max_prime, uint, 0400);
2379 
2380 MODULE_AUTHOR("Intel Corporation");
2381 MODULE_LICENSE("GPL");
2382