• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Test cases for the drm_mm range manager
3  */
4 
5 #define pr_fmt(fmt) "drm_mm: " fmt
6 
7 #include <linux/module.h>
8 #include <linux/prime_numbers.h>
9 #include <linux/slab.h>
10 #include <linux/random.h>
11 #include <linux/vmalloc.h>
12 
13 #include <drm/drm_mm.h>
14 
15 #include "../lib/drm_random.h"
16 
17 #define TESTS "drm_mm_selftests.h"
18 #include "drm_selftest.h"
19 
20 static unsigned int random_seed;
21 static unsigned int max_iterations = 8192;
22 static unsigned int max_prime = 128;
23 
24 enum {
25 	BEST,
26 	BOTTOMUP,
27 	TOPDOWN,
28 	EVICT,
29 };
30 
31 static const struct insert_mode {
32 	const char *name;
33 	enum drm_mm_insert_mode mode;
34 } insert_modes[] = {
35 	[BEST] = { "best", DRM_MM_INSERT_BEST },
36 	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
39 	{}
40 }, evict_modes[] = {
41 	{ "bottom-up", DRM_MM_INSERT_LOW },
42 	{ "top-down", DRM_MM_INSERT_HIGH },
43 	{}
44 };
45 
igt_sanitycheck(void * ignored)46 static int igt_sanitycheck(void *ignored)
47 {
48 	pr_info("%s - ok!\n", __func__);
49 	return 0;
50 }
51 
assert_no_holes(const struct drm_mm * mm)52 static bool assert_no_holes(const struct drm_mm *mm)
53 {
54 	struct drm_mm_node *hole;
55 	u64 hole_start, hole_end;
56 	unsigned long count;
57 
58 	count = 0;
59 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
60 		count++;
61 	if (count) {
62 		pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
63 		return false;
64 	}
65 
66 	drm_mm_for_each_node(hole, mm) {
67 		if (drm_mm_hole_follows(hole)) {
68 			pr_err("Hole follows node, expected none!\n");
69 			return false;
70 		}
71 	}
72 
73 	return true;
74 }
75 
assert_one_hole(const struct drm_mm * mm,u64 start,u64 end)76 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
77 {
78 	struct drm_mm_node *hole;
79 	u64 hole_start, hole_end;
80 	unsigned long count;
81 	bool ok = true;
82 
83 	if (end <= start)
84 		return true;
85 
86 	count = 0;
87 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
88 		if (start != hole_start || end != hole_end) {
89 			if (ok)
90 				pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
91 				       hole_start, hole_end,
92 				       start, end);
93 			ok = false;
94 		}
95 		count++;
96 	}
97 	if (count != 1) {
98 		pr_err("Expected to find one hole, found %lu instead\n", count);
99 		ok = false;
100 	}
101 
102 	return ok;
103 }
104 
assert_continuous(const struct drm_mm * mm,u64 size)105 static bool assert_continuous(const struct drm_mm *mm, u64 size)
106 {
107 	struct drm_mm_node *node, *check, *found;
108 	unsigned long n;
109 	u64 addr;
110 
111 	if (!assert_no_holes(mm))
112 		return false;
113 
114 	n = 0;
115 	addr = 0;
116 	drm_mm_for_each_node(node, mm) {
117 		if (node->start != addr) {
118 			pr_err("node[%ld] list out of order, expected %llx found %llx\n",
119 			       n, addr, node->start);
120 			return false;
121 		}
122 
123 		if (node->size != size) {
124 			pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
125 			       n, size, node->size);
126 			return false;
127 		}
128 
129 		if (drm_mm_hole_follows(node)) {
130 			pr_err("node[%ld] is followed by a hole!\n", n);
131 			return false;
132 		}
133 
134 		found = NULL;
135 		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
136 			if (node != check) {
137 				pr_err("lookup return wrong node, expected start %llx, found %llx\n",
138 				       node->start, check->start);
139 				return false;
140 			}
141 			found = check;
142 		}
143 		if (!found) {
144 			pr_err("lookup failed for node %llx + %llx\n",
145 			       addr, size);
146 			return false;
147 		}
148 
149 		addr += size;
150 		n++;
151 	}
152 
153 	return true;
154 }
155 
misalignment(struct drm_mm_node * node,u64 alignment)156 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
157 {
158 	u64 rem;
159 
160 	if (!alignment)
161 		return 0;
162 
163 	div64_u64_rem(node->start, alignment, &rem);
164 	return rem;
165 }
166 
assert_node(struct drm_mm_node * node,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color)167 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
168 			u64 size, u64 alignment, unsigned long color)
169 {
170 	bool ok = true;
171 
172 	if (!drm_mm_node_allocated(node) || node->mm != mm) {
173 		pr_err("node not allocated\n");
174 		ok = false;
175 	}
176 
177 	if (node->size != size) {
178 		pr_err("node has wrong size, found %llu, expected %llu\n",
179 		       node->size, size);
180 		ok = false;
181 	}
182 
183 	if (misalignment(node, alignment)) {
184 		pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
185 		       node->start, misalignment(node, alignment), alignment);
186 		ok = false;
187 	}
188 
189 	if (node->color != color) {
190 		pr_err("node has wrong color, found %lu, expected %lu\n",
191 		       node->color, color);
192 		ok = false;
193 	}
194 
195 	return ok;
196 }
197 
198 #define show_mm(mm) do { \
199 	struct drm_printer __p = drm_debug_printer(__func__); \
200 	drm_mm_print((mm), &__p); } while (0)
201 
igt_init(void * ignored)202 static int igt_init(void *ignored)
203 {
204 	const unsigned int size = 4096;
205 	struct drm_mm mm;
206 	struct drm_mm_node tmp;
207 	int ret = -EINVAL;
208 
209 	/* Start with some simple checks on initialising the struct drm_mm */
210 	memset(&mm, 0, sizeof(mm));
211 	if (drm_mm_initialized(&mm)) {
212 		pr_err("zeroed mm claims to be initialized\n");
213 		return ret;
214 	}
215 
216 	memset(&mm, 0xff, sizeof(mm));
217 	drm_mm_init(&mm, 0, size);
218 	if (!drm_mm_initialized(&mm)) {
219 		pr_err("mm claims not to be initialized\n");
220 		goto out;
221 	}
222 
223 	if (!drm_mm_clean(&mm)) {
224 		pr_err("mm not empty on creation\n");
225 		goto out;
226 	}
227 
228 	/* After creation, it should all be one massive hole */
229 	if (!assert_one_hole(&mm, 0, size)) {
230 		ret = -EINVAL;
231 		goto out;
232 	}
233 
234 	memset(&tmp, 0, sizeof(tmp));
235 	tmp.start = 0;
236 	tmp.size = size;
237 	ret = drm_mm_reserve_node(&mm, &tmp);
238 	if (ret) {
239 		pr_err("failed to reserve whole drm_mm\n");
240 		goto out;
241 	}
242 
243 	/* After filling the range entirely, there should be no holes */
244 	if (!assert_no_holes(&mm)) {
245 		ret = -EINVAL;
246 		goto out;
247 	}
248 
249 	/* And then after emptying it again, the massive hole should be back */
250 	drm_mm_remove_node(&tmp);
251 	if (!assert_one_hole(&mm, 0, size)) {
252 		ret = -EINVAL;
253 		goto out;
254 	}
255 
256 out:
257 	if (ret)
258 		show_mm(&mm);
259 	drm_mm_takedown(&mm);
260 	return ret;
261 }
262 
igt_debug(void * ignored)263 static int igt_debug(void *ignored)
264 {
265 	struct drm_mm mm;
266 	struct drm_mm_node nodes[2];
267 	int ret;
268 
269 	/* Create a small drm_mm with a couple of nodes and a few holes, and
270 	 * check that the debug iterator doesn't explode over a trivial drm_mm.
271 	 */
272 
273 	drm_mm_init(&mm, 0, 4096);
274 
275 	memset(nodes, 0, sizeof(nodes));
276 	nodes[0].start = 512;
277 	nodes[0].size = 1024;
278 	ret = drm_mm_reserve_node(&mm, &nodes[0]);
279 	if (ret) {
280 		pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
281 		       nodes[0].start, nodes[0].size);
282 		return ret;
283 	}
284 
285 	nodes[1].size = 1024;
286 	nodes[1].start = 4096 - 512 - nodes[1].size;
287 	ret = drm_mm_reserve_node(&mm, &nodes[1]);
288 	if (ret) {
289 		pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
290 		       nodes[1].start, nodes[1].size);
291 		return ret;
292 	}
293 
294 	show_mm(&mm);
295 	return 0;
296 }
297 
set_node(struct drm_mm_node * node,u64 start,u64 size)298 static struct drm_mm_node *set_node(struct drm_mm_node *node,
299 				    u64 start, u64 size)
300 {
301 	node->start = start;
302 	node->size = size;
303 	return node;
304 }
305 
expect_reserve_fail(struct drm_mm * mm,struct drm_mm_node * node)306 static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
307 {
308 	int err;
309 
310 	err = drm_mm_reserve_node(mm, node);
311 	if (likely(err == -ENOSPC))
312 		return true;
313 
314 	if (!err) {
315 		pr_err("impossible reserve succeeded, node %llu + %llu\n",
316 		       node->start, node->size);
317 		drm_mm_remove_node(node);
318 	} else {
319 		pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
320 		       err, -ENOSPC, node->start, node->size);
321 	}
322 	return false;
323 }
324 
check_reserve_boundaries(struct drm_mm * mm,unsigned int count,u64 size)325 static bool check_reserve_boundaries(struct drm_mm *mm,
326 				     unsigned int count,
327 				     u64 size)
328 {
329 	const struct boundary {
330 		u64 start, size;
331 		const char *name;
332 	} boundaries[] = {
333 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
334 		B(0, 0),
335 		B(-size, 0),
336 		B(size, 0),
337 		B(size * count, 0),
338 		B(-size, size),
339 		B(-size, -size),
340 		B(-size, 2*size),
341 		B(0, -size),
342 		B(size, -size),
343 		B(count*size, size),
344 		B(count*size, -size),
345 		B(count*size, count*size),
346 		B(count*size, -count*size),
347 		B(count*size, -(count+1)*size),
348 		B((count+1)*size, size),
349 		B((count+1)*size, -size),
350 		B((count+1)*size, -2*size),
351 #undef B
352 	};
353 	struct drm_mm_node tmp = {};
354 	int n;
355 
356 	for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
357 		if (!expect_reserve_fail(mm,
358 					 set_node(&tmp,
359 						  boundaries[n].start,
360 						  boundaries[n].size))) {
361 			pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
362 			       n, boundaries[n].name, count, size);
363 			return false;
364 		}
365 	}
366 
367 	return true;
368 }
369 
__igt_reserve(unsigned int count,u64 size)370 static int __igt_reserve(unsigned int count, u64 size)
371 {
372 	DRM_RND_STATE(prng, random_seed);
373 	struct drm_mm mm;
374 	struct drm_mm_node tmp, *nodes, *node, *next;
375 	unsigned int *order, n, m, o = 0;
376 	int ret, err;
377 
378 	/* For exercising drm_mm_reserve_node(), we want to check that
379 	 * reservations outside of the drm_mm range are rejected, and to
380 	 * overlapping and otherwise already occupied ranges. Afterwards,
381 	 * the tree and nodes should be intact.
382 	 */
383 
384 	DRM_MM_BUG_ON(!count);
385 	DRM_MM_BUG_ON(!size);
386 
387 	ret = -ENOMEM;
388 	order = drm_random_order(count, &prng);
389 	if (!order)
390 		goto err;
391 
392 	nodes = vzalloc(sizeof(*nodes) * count);
393 	if (!nodes)
394 		goto err_order;
395 
396 	ret = -EINVAL;
397 	drm_mm_init(&mm, 0, count * size);
398 
399 	if (!check_reserve_boundaries(&mm, count, size))
400 		goto out;
401 
402 	for (n = 0; n < count; n++) {
403 		nodes[n].start = order[n] * size;
404 		nodes[n].size = size;
405 
406 		err = drm_mm_reserve_node(&mm, &nodes[n]);
407 		if (err) {
408 			pr_err("reserve failed, step %d, start %llu\n",
409 			       n, nodes[n].start);
410 			ret = err;
411 			goto out;
412 		}
413 
414 		if (!drm_mm_node_allocated(&nodes[n])) {
415 			pr_err("reserved node not allocated! step %d, start %llu\n",
416 			       n, nodes[n].start);
417 			goto out;
418 		}
419 
420 		if (!expect_reserve_fail(&mm, &nodes[n]))
421 			goto out;
422 	}
423 
424 	/* After random insertion the nodes should be in order */
425 	if (!assert_continuous(&mm, size))
426 		goto out;
427 
428 	/* Repeated use should then fail */
429 	drm_random_reorder(order, count, &prng);
430 	for (n = 0; n < count; n++) {
431 		if (!expect_reserve_fail(&mm,
432 					 set_node(&tmp, order[n] * size, 1)))
433 			goto out;
434 
435 		/* Remove and reinsert should work */
436 		drm_mm_remove_node(&nodes[order[n]]);
437 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
438 		if (err) {
439 			pr_err("reserve failed, step %d, start %llu\n",
440 			       n, nodes[n].start);
441 			ret = err;
442 			goto out;
443 		}
444 	}
445 
446 	if (!assert_continuous(&mm, size))
447 		goto out;
448 
449 	/* Overlapping use should then fail */
450 	for (n = 0; n < count; n++) {
451 		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
452 			goto out;
453 	}
454 	for (n = 0; n < count; n++) {
455 		if (!expect_reserve_fail(&mm,
456 					 set_node(&tmp,
457 						  size * n,
458 						  size * (count - n))))
459 			goto out;
460 	}
461 
462 	/* Remove several, reinsert, check full */
463 	for_each_prime_number(n, min(max_prime, count)) {
464 		for (m = 0; m < n; m++) {
465 			node = &nodes[order[(o + m) % count]];
466 			drm_mm_remove_node(node);
467 		}
468 
469 		for (m = 0; m < n; m++) {
470 			node = &nodes[order[(o + m) % count]];
471 			err = drm_mm_reserve_node(&mm, node);
472 			if (err) {
473 				pr_err("reserve failed, step %d/%d, start %llu\n",
474 				       m, n, node->start);
475 				ret = err;
476 				goto out;
477 			}
478 		}
479 
480 		o += n;
481 
482 		if (!assert_continuous(&mm, size))
483 			goto out;
484 	}
485 
486 	ret = 0;
487 out:
488 	drm_mm_for_each_node_safe(node, next, &mm)
489 		drm_mm_remove_node(node);
490 	drm_mm_takedown(&mm);
491 	vfree(nodes);
492 err_order:
493 	kfree(order);
494 err:
495 	return ret;
496 }
497 
igt_reserve(void * ignored)498 static int igt_reserve(void *ignored)
499 {
500 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
501 	int n, ret;
502 
503 	for_each_prime_number_from(n, 1, 54) {
504 		u64 size = BIT_ULL(n);
505 
506 		ret = __igt_reserve(count, size - 1);
507 		if (ret)
508 			return ret;
509 
510 		ret = __igt_reserve(count, size);
511 		if (ret)
512 			return ret;
513 
514 		ret = __igt_reserve(count, size + 1);
515 		if (ret)
516 			return ret;
517 
518 		cond_resched();
519 	}
520 
521 	return 0;
522 }
523 
expect_insert(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode)524 static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
525 			  u64 size, u64 alignment, unsigned long color,
526 			  const struct insert_mode *mode)
527 {
528 	int err;
529 
530 	err = drm_mm_insert_node_generic(mm, node,
531 					 size, alignment, color,
532 					 mode->mode);
533 	if (err) {
534 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
535 		       size, alignment, color, mode->name, err);
536 		return false;
537 	}
538 
539 	if (!assert_node(node, mm, size, alignment, color)) {
540 		drm_mm_remove_node(node);
541 		return false;
542 	}
543 
544 	return true;
545 }
546 
expect_insert_fail(struct drm_mm * mm,u64 size)547 static bool expect_insert_fail(struct drm_mm *mm, u64 size)
548 {
549 	struct drm_mm_node tmp = {};
550 	int err;
551 
552 	err = drm_mm_insert_node(mm, &tmp, size);
553 	if (likely(err == -ENOSPC))
554 		return true;
555 
556 	if (!err) {
557 		pr_err("impossible insert succeeded, node %llu + %llu\n",
558 		       tmp.start, tmp.size);
559 		drm_mm_remove_node(&tmp);
560 	} else {
561 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
562 		       err, -ENOSPC, size);
563 	}
564 	return false;
565 }
566 
__igt_insert(unsigned int count,u64 size,bool replace)567 static int __igt_insert(unsigned int count, u64 size, bool replace)
568 {
569 	DRM_RND_STATE(prng, random_seed);
570 	const struct insert_mode *mode;
571 	struct drm_mm mm;
572 	struct drm_mm_node *nodes, *node, *next;
573 	unsigned int *order, n, m, o = 0;
574 	int ret;
575 
576 	/* Fill a range with lots of nodes, check it doesn't fail too early */
577 
578 	DRM_MM_BUG_ON(!count);
579 	DRM_MM_BUG_ON(!size);
580 
581 	ret = -ENOMEM;
582 	nodes = vmalloc(count * sizeof(*nodes));
583 	if (!nodes)
584 		goto err;
585 
586 	order = drm_random_order(count, &prng);
587 	if (!order)
588 		goto err_nodes;
589 
590 	ret = -EINVAL;
591 	drm_mm_init(&mm, 0, count * size);
592 
593 	for (mode = insert_modes; mode->name; mode++) {
594 		for (n = 0; n < count; n++) {
595 			struct drm_mm_node tmp;
596 
597 			node = replace ? &tmp : &nodes[n];
598 			memset(node, 0, sizeof(*node));
599 			if (!expect_insert(&mm, node, size, 0, n, mode)) {
600 				pr_err("%s insert failed, size %llu step %d\n",
601 				       mode->name, size, n);
602 				goto out;
603 			}
604 
605 			if (replace) {
606 				drm_mm_replace_node(&tmp, &nodes[n]);
607 				if (drm_mm_node_allocated(&tmp)) {
608 					pr_err("replaced old-node still allocated! step %d\n",
609 					       n);
610 					goto out;
611 				}
612 
613 				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
614 					pr_err("replaced node did not inherit parameters, size %llu step %d\n",
615 					       size, n);
616 					goto out;
617 				}
618 
619 				if (tmp.start != nodes[n].start) {
620 					pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
621 					       tmp.start, size,
622 					       nodes[n].start, nodes[n].size);
623 					goto out;
624 				}
625 			}
626 		}
627 
628 		/* After random insertion the nodes should be in order */
629 		if (!assert_continuous(&mm, size))
630 			goto out;
631 
632 		/* Repeated use should then fail */
633 		if (!expect_insert_fail(&mm, size))
634 			goto out;
635 
636 		/* Remove one and reinsert, as the only hole it should refill itself */
637 		for (n = 0; n < count; n++) {
638 			u64 addr = nodes[n].start;
639 
640 			drm_mm_remove_node(&nodes[n]);
641 			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
642 				pr_err("%s reinsert failed, size %llu step %d\n",
643 				       mode->name, size, n);
644 				goto out;
645 			}
646 
647 			if (nodes[n].start != addr) {
648 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
649 				       mode->name, n, addr, nodes[n].start);
650 				goto out;
651 			}
652 
653 			if (!assert_continuous(&mm, size))
654 				goto out;
655 		}
656 
657 		/* Remove several, reinsert, check full */
658 		for_each_prime_number(n, min(max_prime, count)) {
659 			for (m = 0; m < n; m++) {
660 				node = &nodes[order[(o + m) % count]];
661 				drm_mm_remove_node(node);
662 			}
663 
664 			for (m = 0; m < n; m++) {
665 				node = &nodes[order[(o + m) % count]];
666 				if (!expect_insert(&mm, node, size, 0, n, mode)) {
667 					pr_err("%s multiple reinsert failed, size %llu step %d\n",
668 					       mode->name, size, n);
669 					goto out;
670 				}
671 			}
672 
673 			o += n;
674 
675 			if (!assert_continuous(&mm, size))
676 				goto out;
677 
678 			if (!expect_insert_fail(&mm, size))
679 				goto out;
680 		}
681 
682 		drm_mm_for_each_node_safe(node, next, &mm)
683 			drm_mm_remove_node(node);
684 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
685 	}
686 
687 	ret = 0;
688 out:
689 	drm_mm_for_each_node_safe(node, next, &mm)
690 		drm_mm_remove_node(node);
691 	drm_mm_takedown(&mm);
692 	kfree(order);
693 err_nodes:
694 	vfree(nodes);
695 err:
696 	return ret;
697 }
698 
igt_insert(void * ignored)699 static int igt_insert(void *ignored)
700 {
701 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
702 	unsigned int n;
703 	int ret;
704 
705 	for_each_prime_number_from(n, 1, 54) {
706 		u64 size = BIT_ULL(n);
707 
708 		ret = __igt_insert(count, size - 1, false);
709 		if (ret)
710 			return ret;
711 
712 		ret = __igt_insert(count, size, false);
713 		if (ret)
714 			return ret;
715 
716 		ret = __igt_insert(count, size + 1, false);
717 		if (ret)
718 			return ret;
719 
720 		cond_resched();
721 	}
722 
723 	return 0;
724 }
725 
igt_replace(void * ignored)726 static int igt_replace(void *ignored)
727 {
728 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
729 	unsigned int n;
730 	int ret;
731 
732 	/* Reuse igt_insert to exercise replacement by inserting a dummy node,
733 	 * then replacing it with the intended node. We want to check that
734 	 * the tree is intact and all the information we need is carried
735 	 * across to the target node.
736 	 */
737 
738 	for_each_prime_number_from(n, 1, 54) {
739 		u64 size = BIT_ULL(n);
740 
741 		ret = __igt_insert(count, size - 1, true);
742 		if (ret)
743 			return ret;
744 
745 		ret = __igt_insert(count, size, true);
746 		if (ret)
747 			return ret;
748 
749 		ret = __igt_insert(count, size + 1, true);
750 		if (ret)
751 			return ret;
752 
753 		cond_resched();
754 	}
755 
756 	return 0;
757 }
758 
expect_insert_in_range(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,const struct insert_mode * mode)759 static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
760 				   u64 size, u64 alignment, unsigned long color,
761 				   u64 range_start, u64 range_end,
762 				   const struct insert_mode *mode)
763 {
764 	int err;
765 
766 	err = drm_mm_insert_node_in_range(mm, node,
767 					  size, alignment, color,
768 					  range_start, range_end,
769 					  mode->mode);
770 	if (err) {
771 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
772 		       size, alignment, color, mode->name,
773 		       range_start, range_end, err);
774 		return false;
775 	}
776 
777 	if (!assert_node(node, mm, size, alignment, color)) {
778 		drm_mm_remove_node(node);
779 		return false;
780 	}
781 
782 	return true;
783 }
784 
expect_insert_in_range_fail(struct drm_mm * mm,u64 size,u64 range_start,u64 range_end)785 static bool expect_insert_in_range_fail(struct drm_mm *mm,
786 					u64 size,
787 					u64 range_start,
788 					u64 range_end)
789 {
790 	struct drm_mm_node tmp = {};
791 	int err;
792 
793 	err = drm_mm_insert_node_in_range(mm, &tmp,
794 					  size, 0, 0,
795 					  range_start, range_end,
796 					  0);
797 	if (likely(err == -ENOSPC))
798 		return true;
799 
800 	if (!err) {
801 		pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
802 		       tmp.start, tmp.size, range_start, range_end);
803 		drm_mm_remove_node(&tmp);
804 	} else {
805 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
806 		       err, -ENOSPC, size, range_start, range_end);
807 	}
808 
809 	return false;
810 }
811 
assert_contiguous_in_range(struct drm_mm * mm,u64 size,u64 start,u64 end)812 static bool assert_contiguous_in_range(struct drm_mm *mm,
813 				       u64 size,
814 				       u64 start,
815 				       u64 end)
816 {
817 	struct drm_mm_node *node;
818 	unsigned int n;
819 
820 	if (!expect_insert_in_range_fail(mm, size, start, end))
821 		return false;
822 
823 	n = div64_u64(start + size - 1, size);
824 	drm_mm_for_each_node(node, mm) {
825 		if (node->start < start || node->start + node->size > end) {
826 			pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
827 			       n, node->start, node->start + node->size, start, end);
828 			return false;
829 		}
830 
831 		if (node->start != n * size) {
832 			pr_err("node %d out of order, expected start %llx, found %llx\n",
833 			       n, n * size, node->start);
834 			return false;
835 		}
836 
837 		if (node->size != size) {
838 			pr_err("node %d has wrong size, expected size %llx, found %llx\n",
839 			       n, size, node->size);
840 			return false;
841 		}
842 
843 		if (drm_mm_hole_follows(node) &&
844 		    drm_mm_hole_node_end(node) < end) {
845 			pr_err("node %d is followed by a hole!\n", n);
846 			return false;
847 		}
848 
849 		n++;
850 	}
851 
852 	if (start > 0) {
853 		node = __drm_mm_interval_first(mm, 0, start - 1);
854 		if (node->allocated) {
855 			pr_err("node before start: node=%llx+%llu, start=%llx\n",
856 			       node->start, node->size, start);
857 			return false;
858 		}
859 	}
860 
861 	if (end < U64_MAX) {
862 		node = __drm_mm_interval_first(mm, end, U64_MAX);
863 		if (node->allocated) {
864 			pr_err("node after end: node=%llx+%llu, end=%llx\n",
865 			       node->start, node->size, end);
866 			return false;
867 		}
868 	}
869 
870 	return true;
871 }
872 
__igt_insert_range(unsigned int count,u64 size,u64 start,u64 end)873 static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
874 {
875 	const struct insert_mode *mode;
876 	struct drm_mm mm;
877 	struct drm_mm_node *nodes, *node, *next;
878 	unsigned int n, start_n, end_n;
879 	int ret;
880 
881 	DRM_MM_BUG_ON(!count);
882 	DRM_MM_BUG_ON(!size);
883 	DRM_MM_BUG_ON(end <= start);
884 
885 	/* Very similar to __igt_insert(), but now instead of populating the
886 	 * full range of the drm_mm, we try to fill a small portion of it.
887 	 */
888 
889 	ret = -ENOMEM;
890 	nodes = vzalloc(count * sizeof(*nodes));
891 	if (!nodes)
892 		goto err;
893 
894 	ret = -EINVAL;
895 	drm_mm_init(&mm, 0, count * size);
896 
897 	start_n = div64_u64(start + size - 1, size);
898 	end_n = div64_u64(end - size, size);
899 
900 	for (mode = insert_modes; mode->name; mode++) {
901 		for (n = start_n; n <= end_n; n++) {
902 			if (!expect_insert_in_range(&mm, &nodes[n],
903 						    size, size, n,
904 						    start, end, mode)) {
905 				pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
906 				       mode->name, size, n,
907 				       start_n, end_n,
908 				       start, end);
909 				goto out;
910 			}
911 		}
912 
913 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
914 			pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
915 			       mode->name, start, end, size);
916 			goto out;
917 		}
918 
919 		/* Remove one and reinsert, it should refill itself */
920 		for (n = start_n; n <= end_n; n++) {
921 			u64 addr = nodes[n].start;
922 
923 			drm_mm_remove_node(&nodes[n]);
924 			if (!expect_insert_in_range(&mm, &nodes[n],
925 						    size, size, n,
926 						    start, end, mode)) {
927 				pr_err("%s reinsert failed, step %d\n", mode->name, n);
928 				goto out;
929 			}
930 
931 			if (nodes[n].start != addr) {
932 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
933 				       mode->name, n, addr, nodes[n].start);
934 				goto out;
935 			}
936 		}
937 
938 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
939 			pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
940 			       mode->name, start, end, size);
941 			goto out;
942 		}
943 
944 		drm_mm_for_each_node_safe(node, next, &mm)
945 			drm_mm_remove_node(node);
946 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
947 	}
948 
949 	ret = 0;
950 out:
951 	drm_mm_for_each_node_safe(node, next, &mm)
952 		drm_mm_remove_node(node);
953 	drm_mm_takedown(&mm);
954 	vfree(nodes);
955 err:
956 	return ret;
957 }
958 
insert_outside_range(void)959 static int insert_outside_range(void)
960 {
961 	struct drm_mm mm;
962 	const unsigned int start = 1024;
963 	const unsigned int end = 2048;
964 	const unsigned int size = end - start;
965 
966 	drm_mm_init(&mm, start, size);
967 
968 	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
969 		return -EINVAL;
970 
971 	if (!expect_insert_in_range_fail(&mm, size,
972 					 start - size/2, start + (size+1)/2))
973 		return -EINVAL;
974 
975 	if (!expect_insert_in_range_fail(&mm, size,
976 					 end - (size+1)/2, end + size/2))
977 		return -EINVAL;
978 
979 	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
980 		return -EINVAL;
981 
982 	drm_mm_takedown(&mm);
983 	return 0;
984 }
985 
igt_insert_range(void * ignored)986 static int igt_insert_range(void *ignored)
987 {
988 	const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
989 	unsigned int n;
990 	int ret;
991 
992 	/* Check that requests outside the bounds of drm_mm are rejected. */
993 	ret = insert_outside_range();
994 	if (ret)
995 		return ret;
996 
997 	for_each_prime_number_from(n, 1, 50) {
998 		const u64 size = BIT_ULL(n);
999 		const u64 max = count * size;
1000 
1001 		ret = __igt_insert_range(count, size, 0, max);
1002 		if (ret)
1003 			return ret;
1004 
1005 		ret = __igt_insert_range(count, size, 1, max);
1006 		if (ret)
1007 			return ret;
1008 
1009 		ret = __igt_insert_range(count, size, 0, max - 1);
1010 		if (ret)
1011 			return ret;
1012 
1013 		ret = __igt_insert_range(count, size, 0, max/2);
1014 		if (ret)
1015 			return ret;
1016 
1017 		ret = __igt_insert_range(count, size, max/2, max);
1018 		if (ret)
1019 			return ret;
1020 
1021 		ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1022 		if (ret)
1023 			return ret;
1024 
1025 		cond_resched();
1026 	}
1027 
1028 	return 0;
1029 }
1030 
igt_align(void * ignored)1031 static int igt_align(void *ignored)
1032 {
1033 	const struct insert_mode *mode;
1034 	const unsigned int max_count = min(8192u, max_prime);
1035 	struct drm_mm mm;
1036 	struct drm_mm_node *nodes, *node, *next;
1037 	unsigned int prime;
1038 	int ret = -EINVAL;
1039 
1040 	/* For each of the possible insertion modes, we pick a few
1041 	 * arbitrary alignments and check that the inserted node
1042 	 * meets our requirements.
1043 	 */
1044 
1045 	nodes = vzalloc(max_count * sizeof(*nodes));
1046 	if (!nodes)
1047 		goto err;
1048 
1049 	drm_mm_init(&mm, 1, U64_MAX - 2);
1050 
1051 	for (mode = insert_modes; mode->name; mode++) {
1052 		unsigned int i = 0;
1053 
1054 		for_each_prime_number_from(prime, 1, max_count) {
1055 			u64 size = next_prime_number(prime);
1056 
1057 			if (!expect_insert(&mm, &nodes[i],
1058 					   size, prime, i,
1059 					   mode)) {
1060 				pr_err("%s insert failed with alignment=%d",
1061 				       mode->name, prime);
1062 				goto out;
1063 			}
1064 
1065 			i++;
1066 		}
1067 
1068 		drm_mm_for_each_node_safe(node, next, &mm)
1069 			drm_mm_remove_node(node);
1070 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1071 		cond_resched();
1072 	}
1073 
1074 	ret = 0;
1075 out:
1076 	drm_mm_for_each_node_safe(node, next, &mm)
1077 		drm_mm_remove_node(node);
1078 	drm_mm_takedown(&mm);
1079 	vfree(nodes);
1080 err:
1081 	return ret;
1082 }
1083 
igt_align_pot(int max)1084 static int igt_align_pot(int max)
1085 {
1086 	struct drm_mm mm;
1087 	struct drm_mm_node *node, *next;
1088 	int bit;
1089 	int ret = -EINVAL;
1090 
1091 	/* Check that we can align to the full u64 address space */
1092 
1093 	drm_mm_init(&mm, 1, U64_MAX - 2);
1094 
1095 	for (bit = max - 1; bit; bit--) {
1096 		u64 align, size;
1097 
1098 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1099 		if (!node) {
1100 			ret = -ENOMEM;
1101 			goto out;
1102 		}
1103 
1104 		align = BIT_ULL(bit);
1105 		size = BIT_ULL(bit-1) + 1;
1106 		if (!expect_insert(&mm, node,
1107 				   size, align, bit,
1108 				   &insert_modes[0])) {
1109 			pr_err("insert failed with alignment=%llx [%d]",
1110 			       align, bit);
1111 			goto out;
1112 		}
1113 
1114 		cond_resched();
1115 	}
1116 
1117 	ret = 0;
1118 out:
1119 	drm_mm_for_each_node_safe(node, next, &mm) {
1120 		drm_mm_remove_node(node);
1121 		kfree(node);
1122 	}
1123 	drm_mm_takedown(&mm);
1124 	return ret;
1125 }
1126 
igt_align32(void * ignored)1127 static int igt_align32(void *ignored)
1128 {
1129 	return igt_align_pot(32);
1130 }
1131 
igt_align64(void * ignored)1132 static int igt_align64(void *ignored)
1133 {
1134 	return igt_align_pot(64);
1135 }
1136 
show_scan(const struct drm_mm_scan * scan)1137 static void show_scan(const struct drm_mm_scan *scan)
1138 {
1139 	pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1140 		scan->hit_start, scan->hit_end,
1141 		scan->size, scan->alignment, scan->color);
1142 }
1143 
show_holes(const struct drm_mm * mm,int count)1144 static void show_holes(const struct drm_mm *mm, int count)
1145 {
1146 	u64 hole_start, hole_end;
1147 	struct drm_mm_node *hole;
1148 
1149 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1150 		struct drm_mm_node *next = list_next_entry(hole, node_list);
1151 		const char *node1 = NULL, *node2 = NULL;
1152 
1153 		if (hole->allocated)
1154 			node1 = kasprintf(GFP_KERNEL,
1155 					  "[%llx + %lld, color=%ld], ",
1156 					  hole->start, hole->size, hole->color);
1157 
1158 		if (next->allocated)
1159 			node2 = kasprintf(GFP_KERNEL,
1160 					  ", [%llx + %lld, color=%ld]",
1161 					  next->start, next->size, next->color);
1162 
1163 		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1164 			node1,
1165 			hole_start, hole_end, hole_end - hole_start,
1166 			node2);
1167 
1168 		kfree(node2);
1169 		kfree(node1);
1170 
1171 		if (!--count)
1172 			break;
1173 	}
1174 }
1175 
1176 struct evict_node {
1177 	struct drm_mm_node node;
1178 	struct list_head link;
1179 };
1180 
evict_nodes(struct drm_mm_scan * scan,struct evict_node * nodes,unsigned int * order,unsigned int count,bool use_color,struct list_head * evict_list)1181 static bool evict_nodes(struct drm_mm_scan *scan,
1182 			struct evict_node *nodes,
1183 			unsigned int *order,
1184 			unsigned int count,
1185 			bool use_color,
1186 			struct list_head *evict_list)
1187 {
1188 	struct evict_node *e, *en;
1189 	unsigned int i;
1190 
1191 	for (i = 0; i < count; i++) {
1192 		e = &nodes[order ? order[i] : i];
1193 		list_add(&e->link, evict_list);
1194 		if (drm_mm_scan_add_block(scan, &e->node))
1195 			break;
1196 	}
1197 	list_for_each_entry_safe(e, en, evict_list, link) {
1198 		if (!drm_mm_scan_remove_block(scan, &e->node))
1199 			list_del(&e->link);
1200 	}
1201 	if (list_empty(evict_list)) {
1202 		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1203 		       scan->size, count, scan->alignment, scan->color);
1204 		return false;
1205 	}
1206 
1207 	list_for_each_entry(e, evict_list, link)
1208 		drm_mm_remove_node(&e->node);
1209 
1210 	if (use_color) {
1211 		struct drm_mm_node *node;
1212 
1213 		while ((node = drm_mm_scan_color_evict(scan))) {
1214 			e = container_of(node, typeof(*e), node);
1215 			drm_mm_remove_node(&e->node);
1216 			list_add(&e->link, evict_list);
1217 		}
1218 	} else {
1219 		if (drm_mm_scan_color_evict(scan)) {
1220 			pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1221 			return false;
1222 		}
1223 	}
1224 
1225 	return true;
1226 }
1227 
evict_nothing(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1228 static bool evict_nothing(struct drm_mm *mm,
1229 			  unsigned int total_size,
1230 			  struct evict_node *nodes)
1231 {
1232 	struct drm_mm_scan scan;
1233 	LIST_HEAD(evict_list);
1234 	struct evict_node *e;
1235 	struct drm_mm_node *node;
1236 	unsigned int n;
1237 
1238 	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1239 	for (n = 0; n < total_size; n++) {
1240 		e = &nodes[n];
1241 		list_add(&e->link, &evict_list);
1242 		drm_mm_scan_add_block(&scan, &e->node);
1243 	}
1244 	list_for_each_entry(e, &evict_list, link)
1245 		drm_mm_scan_remove_block(&scan, &e->node);
1246 
1247 	for (n = 0; n < total_size; n++) {
1248 		e = &nodes[n];
1249 
1250 		if (!drm_mm_node_allocated(&e->node)) {
1251 			pr_err("node[%d] no longer allocated!\n", n);
1252 			return false;
1253 		}
1254 
1255 		e->link.next = NULL;
1256 	}
1257 
1258 	drm_mm_for_each_node(node, mm) {
1259 		e = container_of(node, typeof(*e), node);
1260 		e->link.next = &e->link;
1261 	}
1262 
1263 	for (n = 0; n < total_size; n++) {
1264 		e = &nodes[n];
1265 
1266 		if (!e->link.next) {
1267 			pr_err("node[%d] no longer connected!\n", n);
1268 			return false;
1269 		}
1270 	}
1271 
1272 	return assert_continuous(mm, nodes[0].node.size);
1273 }
1274 
evict_everything(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1275 static bool evict_everything(struct drm_mm *mm,
1276 			     unsigned int total_size,
1277 			     struct evict_node *nodes)
1278 {
1279 	struct drm_mm_scan scan;
1280 	LIST_HEAD(evict_list);
1281 	struct evict_node *e;
1282 	unsigned int n;
1283 	int err;
1284 
1285 	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1286 	for (n = 0; n < total_size; n++) {
1287 		e = &nodes[n];
1288 		list_add(&e->link, &evict_list);
1289 		if (drm_mm_scan_add_block(&scan, &e->node))
1290 			break;
1291 	}
1292 
1293 	err = 0;
1294 	list_for_each_entry(e, &evict_list, link) {
1295 		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1296 			if (!err) {
1297 				pr_err("Node %lld not marked for eviction!\n",
1298 				       e->node.start);
1299 				err = -EINVAL;
1300 			}
1301 		}
1302 	}
1303 	if (err)
1304 		return false;
1305 
1306 	list_for_each_entry(e, &evict_list, link)
1307 		drm_mm_remove_node(&e->node);
1308 
1309 	if (!assert_one_hole(mm, 0, total_size))
1310 		return false;
1311 
1312 	list_for_each_entry(e, &evict_list, link) {
1313 		err = drm_mm_reserve_node(mm, &e->node);
1314 		if (err) {
1315 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1316 			       e->node.start);
1317 			return false;
1318 		}
1319 	}
1320 
1321 	return assert_continuous(mm, nodes[0].node.size);
1322 }
1323 
evict_something(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,const struct insert_mode * mode)1324 static int evict_something(struct drm_mm *mm,
1325 			   u64 range_start, u64 range_end,
1326 			   struct evict_node *nodes,
1327 			   unsigned int *order,
1328 			   unsigned int count,
1329 			   unsigned int size,
1330 			   unsigned int alignment,
1331 			   const struct insert_mode *mode)
1332 {
1333 	struct drm_mm_scan scan;
1334 	LIST_HEAD(evict_list);
1335 	struct evict_node *e;
1336 	struct drm_mm_node tmp;
1337 	int err;
1338 
1339 	drm_mm_scan_init_with_range(&scan, mm,
1340 				    size, alignment, 0,
1341 				    range_start, range_end,
1342 				    mode->mode);
1343 	if (!evict_nodes(&scan,
1344 			 nodes, order, count, false,
1345 			 &evict_list))
1346 		return -EINVAL;
1347 
1348 	memset(&tmp, 0, sizeof(tmp));
1349 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1350 					 DRM_MM_INSERT_EVICT);
1351 	if (err) {
1352 		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1353 		       size, alignment);
1354 		show_scan(&scan);
1355 		show_holes(mm, 3);
1356 		return err;
1357 	}
1358 
1359 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1360 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1361 		       tmp.start, tmp.size, range_start, range_end);
1362 		err = -EINVAL;
1363 	}
1364 
1365 	if (!assert_node(&tmp, mm, size, alignment, 0) ||
1366 	    drm_mm_hole_follows(&tmp)) {
1367 		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1368 		       tmp.size, size,
1369 		       alignment, misalignment(&tmp, alignment),
1370 		       tmp.start, drm_mm_hole_follows(&tmp));
1371 		err = -EINVAL;
1372 	}
1373 
1374 	drm_mm_remove_node(&tmp);
1375 	if (err)
1376 		return err;
1377 
1378 	list_for_each_entry(e, &evict_list, link) {
1379 		err = drm_mm_reserve_node(mm, &e->node);
1380 		if (err) {
1381 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1382 			       e->node.start);
1383 			return err;
1384 		}
1385 	}
1386 
1387 	if (!assert_continuous(mm, nodes[0].node.size)) {
1388 		pr_err("range is no longer continuous\n");
1389 		return -EINVAL;
1390 	}
1391 
1392 	return 0;
1393 }
1394 
igt_evict(void * ignored)1395 static int igt_evict(void *ignored)
1396 {
1397 	DRM_RND_STATE(prng, random_seed);
1398 	const unsigned int size = 8192;
1399 	const struct insert_mode *mode;
1400 	struct drm_mm mm;
1401 	struct evict_node *nodes;
1402 	struct drm_mm_node *node, *next;
1403 	unsigned int *order, n;
1404 	int ret, err;
1405 
1406 	/* Here we populate a full drm_mm and then try and insert a new node
1407 	 * by evicting other nodes in a random order. The drm_mm_scan should
1408 	 * pick the first matching hole it finds from the random list. We
1409 	 * repeat that for different allocation strategies, alignments and
1410 	 * sizes to try and stress the hole finder.
1411 	 */
1412 
1413 	ret = -ENOMEM;
1414 	nodes = vzalloc(size * sizeof(*nodes));
1415 	if (!nodes)
1416 		goto err;
1417 
1418 	order = drm_random_order(size, &prng);
1419 	if (!order)
1420 		goto err_nodes;
1421 
1422 	ret = -EINVAL;
1423 	drm_mm_init(&mm, 0, size);
1424 	for (n = 0; n < size; n++) {
1425 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1426 		if (err) {
1427 			pr_err("insert failed, step %d\n", n);
1428 			ret = err;
1429 			goto out;
1430 		}
1431 	}
1432 
1433 	/* First check that using the scanner doesn't break the mm */
1434 	if (!evict_nothing(&mm, size, nodes)) {
1435 		pr_err("evict_nothing() failed\n");
1436 		goto out;
1437 	}
1438 	if (!evict_everything(&mm, size, nodes)) {
1439 		pr_err("evict_everything() failed\n");
1440 		goto out;
1441 	}
1442 
1443 	for (mode = evict_modes; mode->name; mode++) {
1444 		for (n = 1; n <= size; n <<= 1) {
1445 			drm_random_reorder(order, size, &prng);
1446 			err = evict_something(&mm, 0, U64_MAX,
1447 					      nodes, order, size,
1448 					      n, 1,
1449 					      mode);
1450 			if (err) {
1451 				pr_err("%s evict_something(size=%u) failed\n",
1452 				       mode->name, n);
1453 				ret = err;
1454 				goto out;
1455 			}
1456 		}
1457 
1458 		for (n = 1; n < size; n <<= 1) {
1459 			drm_random_reorder(order, size, &prng);
1460 			err = evict_something(&mm, 0, U64_MAX,
1461 					      nodes, order, size,
1462 					      size/2, n,
1463 					      mode);
1464 			if (err) {
1465 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1466 				       mode->name, size/2, n);
1467 				ret = err;
1468 				goto out;
1469 			}
1470 		}
1471 
1472 		for_each_prime_number_from(n, 1, min(size, max_prime)) {
1473 			unsigned int nsize = (size - n + 1) / 2;
1474 
1475 			DRM_MM_BUG_ON(!nsize);
1476 
1477 			drm_random_reorder(order, size, &prng);
1478 			err = evict_something(&mm, 0, U64_MAX,
1479 					      nodes, order, size,
1480 					      nsize, n,
1481 					      mode);
1482 			if (err) {
1483 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1484 				       mode->name, nsize, n);
1485 				ret = err;
1486 				goto out;
1487 			}
1488 		}
1489 
1490 		cond_resched();
1491 	}
1492 
1493 	ret = 0;
1494 out:
1495 	drm_mm_for_each_node_safe(node, next, &mm)
1496 		drm_mm_remove_node(node);
1497 	drm_mm_takedown(&mm);
1498 	kfree(order);
1499 err_nodes:
1500 	vfree(nodes);
1501 err:
1502 	return ret;
1503 }
1504 
igt_evict_range(void * ignored)1505 static int igt_evict_range(void *ignored)
1506 {
1507 	DRM_RND_STATE(prng, random_seed);
1508 	const unsigned int size = 8192;
1509 	const unsigned int range_size = size / 2;
1510 	const unsigned int range_start = size / 4;
1511 	const unsigned int range_end = range_start + range_size;
1512 	const struct insert_mode *mode;
1513 	struct drm_mm mm;
1514 	struct evict_node *nodes;
1515 	struct drm_mm_node *node, *next;
1516 	unsigned int *order, n;
1517 	int ret, err;
1518 
1519 	/* Like igt_evict() but now we are limiting the search to a
1520 	 * small portion of the full drm_mm.
1521 	 */
1522 
1523 	ret = -ENOMEM;
1524 	nodes = vzalloc(size * sizeof(*nodes));
1525 	if (!nodes)
1526 		goto err;
1527 
1528 	order = drm_random_order(size, &prng);
1529 	if (!order)
1530 		goto err_nodes;
1531 
1532 	ret = -EINVAL;
1533 	drm_mm_init(&mm, 0, size);
1534 	for (n = 0; n < size; n++) {
1535 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1536 		if (err) {
1537 			pr_err("insert failed, step %d\n", n);
1538 			ret = err;
1539 			goto out;
1540 		}
1541 	}
1542 
1543 	for (mode = evict_modes; mode->name; mode++) {
1544 		for (n = 1; n <= range_size; n <<= 1) {
1545 			drm_random_reorder(order, size, &prng);
1546 			err = evict_something(&mm, range_start, range_end,
1547 					      nodes, order, size,
1548 					      n, 1,
1549 					      mode);
1550 			if (err) {
1551 				pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1552 				       mode->name, n, range_start, range_end);
1553 				goto out;
1554 			}
1555 		}
1556 
1557 		for (n = 1; n <= range_size; n <<= 1) {
1558 			drm_random_reorder(order, size, &prng);
1559 			err = evict_something(&mm, range_start, range_end,
1560 					      nodes, order, size,
1561 					      range_size/2, n,
1562 					      mode);
1563 			if (err) {
1564 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1565 				       mode->name, range_size/2, n, range_start, range_end);
1566 				goto out;
1567 			}
1568 		}
1569 
1570 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1571 			unsigned int nsize = (range_size - n + 1) / 2;
1572 
1573 			DRM_MM_BUG_ON(!nsize);
1574 
1575 			drm_random_reorder(order, size, &prng);
1576 			err = evict_something(&mm, range_start, range_end,
1577 					      nodes, order, size,
1578 					      nsize, n,
1579 					      mode);
1580 			if (err) {
1581 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1582 				       mode->name, nsize, n, range_start, range_end);
1583 				goto out;
1584 			}
1585 		}
1586 
1587 		cond_resched();
1588 	}
1589 
1590 	ret = 0;
1591 out:
1592 	drm_mm_for_each_node_safe(node, next, &mm)
1593 		drm_mm_remove_node(node);
1594 	drm_mm_takedown(&mm);
1595 	kfree(order);
1596 err_nodes:
1597 	vfree(nodes);
1598 err:
1599 	return ret;
1600 }
1601 
node_index(const struct drm_mm_node * node)1602 static unsigned int node_index(const struct drm_mm_node *node)
1603 {
1604 	return div64_u64(node->start, node->size);
1605 }
1606 
igt_topdown(void * ignored)1607 static int igt_topdown(void *ignored)
1608 {
1609 	const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1610 	DRM_RND_STATE(prng, random_seed);
1611 	const unsigned int count = 8192;
1612 	unsigned int size;
1613 	unsigned long *bitmap = NULL;
1614 	struct drm_mm mm;
1615 	struct drm_mm_node *nodes, *node, *next;
1616 	unsigned int *order, n, m, o = 0;
1617 	int ret;
1618 
1619 	/* When allocating top-down, we expect to be returned a node
1620 	 * from a suitable hole at the top of the drm_mm. We check that
1621 	 * the returned node does match the highest available slot.
1622 	 */
1623 
1624 	ret = -ENOMEM;
1625 	nodes = vzalloc(count * sizeof(*nodes));
1626 	if (!nodes)
1627 		goto err;
1628 
1629 	bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1630 			 GFP_KERNEL);
1631 	if (!bitmap)
1632 		goto err_nodes;
1633 
1634 	order = drm_random_order(count, &prng);
1635 	if (!order)
1636 		goto err_bitmap;
1637 
1638 	ret = -EINVAL;
1639 	for (size = 1; size <= 64; size <<= 1) {
1640 		drm_mm_init(&mm, 0, size*count);
1641 		for (n = 0; n < count; n++) {
1642 			if (!expect_insert(&mm, &nodes[n],
1643 					   size, 0, n,
1644 					   topdown)) {
1645 				pr_err("insert failed, size %u step %d\n", size, n);
1646 				goto out;
1647 			}
1648 
1649 			if (drm_mm_hole_follows(&nodes[n])) {
1650 				pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1651 				       n, nodes[n].start, size);
1652 				goto out;
1653 			}
1654 
1655 			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1656 				goto out;
1657 		}
1658 
1659 		if (!assert_continuous(&mm, size))
1660 			goto out;
1661 
1662 		drm_random_reorder(order, count, &prng);
1663 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1664 			for (m = 0; m < n; m++) {
1665 				node = &nodes[order[(o + m) % count]];
1666 				drm_mm_remove_node(node);
1667 				__set_bit(node_index(node), bitmap);
1668 			}
1669 
1670 			for (m = 0; m < n; m++) {
1671 				unsigned int last;
1672 
1673 				node = &nodes[order[(o + m) % count]];
1674 				if (!expect_insert(&mm, node,
1675 						   size, 0, 0,
1676 						   topdown)) {
1677 					pr_err("insert failed, step %d/%d\n", m, n);
1678 					goto out;
1679 				}
1680 
1681 				if (drm_mm_hole_follows(node)) {
1682 					pr_err("hole after topdown insert %d/%d, start=%llx\n",
1683 					       m, n, node->start);
1684 					goto out;
1685 				}
1686 
1687 				last = find_last_bit(bitmap, count);
1688 				if (node_index(node) != last) {
1689 					pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1690 					       m, n, size, last, node_index(node));
1691 					goto out;
1692 				}
1693 
1694 				__clear_bit(last, bitmap);
1695 			}
1696 
1697 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1698 
1699 			o += n;
1700 		}
1701 
1702 		drm_mm_for_each_node_safe(node, next, &mm)
1703 			drm_mm_remove_node(node);
1704 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1705 		cond_resched();
1706 	}
1707 
1708 	ret = 0;
1709 out:
1710 	drm_mm_for_each_node_safe(node, next, &mm)
1711 		drm_mm_remove_node(node);
1712 	drm_mm_takedown(&mm);
1713 	kfree(order);
1714 err_bitmap:
1715 	kfree(bitmap);
1716 err_nodes:
1717 	vfree(nodes);
1718 err:
1719 	return ret;
1720 }
1721 
igt_bottomup(void * ignored)1722 static int igt_bottomup(void *ignored)
1723 {
1724 	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1725 	DRM_RND_STATE(prng, random_seed);
1726 	const unsigned int count = 8192;
1727 	unsigned int size;
1728 	unsigned long *bitmap;
1729 	struct drm_mm mm;
1730 	struct drm_mm_node *nodes, *node, *next;
1731 	unsigned int *order, n, m, o = 0;
1732 	int ret;
1733 
1734 	/* Like igt_topdown, but instead of searching for the last hole,
1735 	 * we search for the first.
1736 	 */
1737 
1738 	ret = -ENOMEM;
1739 	nodes = vzalloc(count * sizeof(*nodes));
1740 	if (!nodes)
1741 		goto err;
1742 
1743 	bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1744 			 GFP_KERNEL);
1745 	if (!bitmap)
1746 		goto err_nodes;
1747 
1748 	order = drm_random_order(count, &prng);
1749 	if (!order)
1750 		goto err_bitmap;
1751 
1752 	ret = -EINVAL;
1753 	for (size = 1; size <= 64; size <<= 1) {
1754 		drm_mm_init(&mm, 0, size*count);
1755 		for (n = 0; n < count; n++) {
1756 			if (!expect_insert(&mm, &nodes[n],
1757 					   size, 0, n,
1758 					   bottomup)) {
1759 				pr_err("bottomup insert failed, size %u step %d\n", size, n);
1760 				goto out;
1761 			}
1762 
1763 			if (!assert_one_hole(&mm, size*(n + 1), size*count))
1764 				goto out;
1765 		}
1766 
1767 		if (!assert_continuous(&mm, size))
1768 			goto out;
1769 
1770 		drm_random_reorder(order, count, &prng);
1771 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1772 			for (m = 0; m < n; m++) {
1773 				node = &nodes[order[(o + m) % count]];
1774 				drm_mm_remove_node(node);
1775 				__set_bit(node_index(node), bitmap);
1776 			}
1777 
1778 			for (m = 0; m < n; m++) {
1779 				unsigned int first;
1780 
1781 				node = &nodes[order[(o + m) % count]];
1782 				if (!expect_insert(&mm, node,
1783 						   size, 0, 0,
1784 						   bottomup)) {
1785 					pr_err("insert failed, step %d/%d\n", m, n);
1786 					goto out;
1787 				}
1788 
1789 				first = find_first_bit(bitmap, count);
1790 				if (node_index(node) != first) {
1791 					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1792 					       m, n, first, node_index(node));
1793 					goto out;
1794 				}
1795 				__clear_bit(first, bitmap);
1796 			}
1797 
1798 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1799 
1800 			o += n;
1801 		}
1802 
1803 		drm_mm_for_each_node_safe(node, next, &mm)
1804 			drm_mm_remove_node(node);
1805 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1806 		cond_resched();
1807 	}
1808 
1809 	ret = 0;
1810 out:
1811 	drm_mm_for_each_node_safe(node, next, &mm)
1812 		drm_mm_remove_node(node);
1813 	drm_mm_takedown(&mm);
1814 	kfree(order);
1815 err_bitmap:
1816 	kfree(bitmap);
1817 err_nodes:
1818 	vfree(nodes);
1819 err:
1820 	return ret;
1821 }
1822 
separate_adjacent_colors(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)1823 static void separate_adjacent_colors(const struct drm_mm_node *node,
1824 				     unsigned long color,
1825 				     u64 *start,
1826 				     u64 *end)
1827 {
1828 	if (node->allocated && node->color != color)
1829 		++*start;
1830 
1831 	node = list_next_entry(node, node_list);
1832 	if (node->allocated && node->color != color)
1833 		--*end;
1834 }
1835 
colors_abutt(const struct drm_mm_node * node)1836 static bool colors_abutt(const struct drm_mm_node *node)
1837 {
1838 	if (!drm_mm_hole_follows(node) &&
1839 	    list_next_entry(node, node_list)->allocated) {
1840 		pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1841 		       node->color, node->start, node->size,
1842 		       list_next_entry(node, node_list)->color,
1843 		       list_next_entry(node, node_list)->start,
1844 		       list_next_entry(node, node_list)->size);
1845 		return true;
1846 	}
1847 
1848 	return false;
1849 }
1850 
igt_color(void * ignored)1851 static int igt_color(void *ignored)
1852 {
1853 	const unsigned int count = min(4096u, max_iterations);
1854 	const struct insert_mode *mode;
1855 	struct drm_mm mm;
1856 	struct drm_mm_node *node, *nn;
1857 	unsigned int n;
1858 	int ret = -EINVAL, err;
1859 
1860 	/* Color adjustment complicates everything. First we just check
1861 	 * that when we insert a node we apply any color_adjustment callback.
1862 	 * The callback we use should ensure that there is a gap between
1863 	 * any two nodes, and so after each insertion we check that those
1864 	 * holes are inserted and that they are preserved.
1865 	 */
1866 
1867 	drm_mm_init(&mm, 0, U64_MAX);
1868 
1869 	for (n = 1; n <= count; n++) {
1870 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1871 		if (!node) {
1872 			ret = -ENOMEM;
1873 			goto out;
1874 		}
1875 
1876 		if (!expect_insert(&mm, node,
1877 				   n, 0, n,
1878 				   &insert_modes[0])) {
1879 			pr_err("insert failed, step %d\n", n);
1880 			kfree(node);
1881 			goto out;
1882 		}
1883 	}
1884 
1885 	drm_mm_for_each_node_safe(node, nn, &mm) {
1886 		if (node->color != node->size) {
1887 			pr_err("invalid color stored: expected %lld, found %ld\n",
1888 			       node->size, node->color);
1889 
1890 			goto out;
1891 		}
1892 
1893 		drm_mm_remove_node(node);
1894 		kfree(node);
1895 	}
1896 
1897 	/* Now, let's start experimenting with applying a color callback */
1898 	mm.color_adjust = separate_adjacent_colors;
1899 	for (mode = insert_modes; mode->name; mode++) {
1900 		u64 last;
1901 
1902 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1903 		if (!node) {
1904 			ret = -ENOMEM;
1905 			goto out;
1906 		}
1907 
1908 		node->size = 1 + 2*count;
1909 		node->color = node->size;
1910 
1911 		err = drm_mm_reserve_node(&mm, node);
1912 		if (err) {
1913 			pr_err("initial reserve failed!\n");
1914 			ret = err;
1915 			goto out;
1916 		}
1917 
1918 		last = node->start + node->size;
1919 
1920 		for (n = 1; n <= count; n++) {
1921 			int rem;
1922 
1923 			node = kzalloc(sizeof(*node), GFP_KERNEL);
1924 			if (!node) {
1925 				ret = -ENOMEM;
1926 				goto out;
1927 			}
1928 
1929 			node->start = last;
1930 			node->size = n + count;
1931 			node->color = node->size;
1932 
1933 			err = drm_mm_reserve_node(&mm, node);
1934 			if (err != -ENOSPC) {
1935 				pr_err("reserve %d did not report color overlap! err=%d\n",
1936 				       n, err);
1937 				goto out;
1938 			}
1939 
1940 			node->start += n + 1;
1941 			rem = misalignment(node, n + count);
1942 			node->start += n + count - rem;
1943 
1944 			err = drm_mm_reserve_node(&mm, node);
1945 			if (err) {
1946 				pr_err("reserve %d failed, err=%d\n", n, err);
1947 				ret = err;
1948 				goto out;
1949 			}
1950 
1951 			last = node->start + node->size;
1952 		}
1953 
1954 		for (n = 1; n <= count; n++) {
1955 			node = kzalloc(sizeof(*node), GFP_KERNEL);
1956 			if (!node) {
1957 				ret = -ENOMEM;
1958 				goto out;
1959 			}
1960 
1961 			if (!expect_insert(&mm, node,
1962 					   n, n, n,
1963 					   mode)) {
1964 				pr_err("%s insert failed, step %d\n",
1965 				       mode->name, n);
1966 				kfree(node);
1967 				goto out;
1968 			}
1969 		}
1970 
1971 		drm_mm_for_each_node_safe(node, nn, &mm) {
1972 			u64 rem;
1973 
1974 			if (node->color != node->size) {
1975 				pr_err("%s invalid color stored: expected %lld, found %ld\n",
1976 				       mode->name, node->size, node->color);
1977 
1978 				goto out;
1979 			}
1980 
1981 			if (colors_abutt(node))
1982 				goto out;
1983 
1984 			div64_u64_rem(node->start, node->size, &rem);
1985 			if (rem) {
1986 				pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1987 				       mode->name, node->start, node->size, rem);
1988 				goto out;
1989 			}
1990 
1991 			drm_mm_remove_node(node);
1992 			kfree(node);
1993 		}
1994 
1995 		cond_resched();
1996 	}
1997 
1998 	ret = 0;
1999 out:
2000 	drm_mm_for_each_node_safe(node, nn, &mm) {
2001 		drm_mm_remove_node(node);
2002 		kfree(node);
2003 	}
2004 	drm_mm_takedown(&mm);
2005 	return ret;
2006 }
2007 
evict_color(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,unsigned long color,const struct insert_mode * mode)2008 static int evict_color(struct drm_mm *mm,
2009 		       u64 range_start, u64 range_end,
2010 		       struct evict_node *nodes,
2011 		       unsigned int *order,
2012 		       unsigned int count,
2013 		       unsigned int size,
2014 		       unsigned int alignment,
2015 		       unsigned long color,
2016 		       const struct insert_mode *mode)
2017 {
2018 	struct drm_mm_scan scan;
2019 	LIST_HEAD(evict_list);
2020 	struct evict_node *e;
2021 	struct drm_mm_node tmp;
2022 	int err;
2023 
2024 	drm_mm_scan_init_with_range(&scan, mm,
2025 				    size, alignment, color,
2026 				    range_start, range_end,
2027 				    mode->mode);
2028 	if (!evict_nodes(&scan,
2029 			 nodes, order, count, true,
2030 			 &evict_list))
2031 		return -EINVAL;
2032 
2033 	memset(&tmp, 0, sizeof(tmp));
2034 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2035 					 DRM_MM_INSERT_EVICT);
2036 	if (err) {
2037 		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2038 		       size, alignment, color, err);
2039 		show_scan(&scan);
2040 		show_holes(mm, 3);
2041 		return err;
2042 	}
2043 
2044 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2045 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2046 		       tmp.start, tmp.size, range_start, range_end);
2047 		err = -EINVAL;
2048 	}
2049 
2050 	if (colors_abutt(&tmp))
2051 		err = -EINVAL;
2052 
2053 	if (!assert_node(&tmp, mm, size, alignment, color)) {
2054 		pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2055 		       tmp.size, size,
2056 		       alignment, misalignment(&tmp, alignment), tmp.start);
2057 		err = -EINVAL;
2058 	}
2059 
2060 	drm_mm_remove_node(&tmp);
2061 	if (err)
2062 		return err;
2063 
2064 	list_for_each_entry(e, &evict_list, link) {
2065 		err = drm_mm_reserve_node(mm, &e->node);
2066 		if (err) {
2067 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
2068 			       e->node.start);
2069 			return err;
2070 		}
2071 	}
2072 
2073 	cond_resched();
2074 	return 0;
2075 }
2076 
igt_color_evict(void * ignored)2077 static int igt_color_evict(void *ignored)
2078 {
2079 	DRM_RND_STATE(prng, random_seed);
2080 	const unsigned int total_size = min(8192u, max_iterations);
2081 	const struct insert_mode *mode;
2082 	unsigned long color = 0;
2083 	struct drm_mm mm;
2084 	struct evict_node *nodes;
2085 	struct drm_mm_node *node, *next;
2086 	unsigned int *order, n;
2087 	int ret, err;
2088 
2089 	/* Check that the drm_mm_scan also honours color adjustment when
2090 	 * choosing its victims to create a hole. Our color_adjust does not
2091 	 * allow two nodes to be placed together without an intervening hole
2092 	 * enlarging the set of victims that must be evicted.
2093 	 */
2094 
2095 	ret = -ENOMEM;
2096 	nodes = vzalloc(total_size * sizeof(*nodes));
2097 	if (!nodes)
2098 		goto err;
2099 
2100 	order = drm_random_order(total_size, &prng);
2101 	if (!order)
2102 		goto err_nodes;
2103 
2104 	ret = -EINVAL;
2105 	drm_mm_init(&mm, 0, 2*total_size - 1);
2106 	mm.color_adjust = separate_adjacent_colors;
2107 	for (n = 0; n < total_size; n++) {
2108 		if (!expect_insert(&mm, &nodes[n].node,
2109 				   1, 0, color++,
2110 				   &insert_modes[0])) {
2111 			pr_err("insert failed, step %d\n", n);
2112 			goto out;
2113 		}
2114 	}
2115 
2116 	for (mode = evict_modes; mode->name; mode++) {
2117 		for (n = 1; n <= total_size; n <<= 1) {
2118 			drm_random_reorder(order, total_size, &prng);
2119 			err = evict_color(&mm, 0, U64_MAX,
2120 					  nodes, order, total_size,
2121 					  n, 1, color++,
2122 					  mode);
2123 			if (err) {
2124 				pr_err("%s evict_color(size=%u) failed\n",
2125 				       mode->name, n);
2126 				goto out;
2127 			}
2128 		}
2129 
2130 		for (n = 1; n < total_size; n <<= 1) {
2131 			drm_random_reorder(order, total_size, &prng);
2132 			err = evict_color(&mm, 0, U64_MAX,
2133 					  nodes, order, total_size,
2134 					  total_size/2, n, color++,
2135 					  mode);
2136 			if (err) {
2137 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2138 				       mode->name, total_size/2, n);
2139 				goto out;
2140 			}
2141 		}
2142 
2143 		for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2144 			unsigned int nsize = (total_size - n + 1) / 2;
2145 
2146 			DRM_MM_BUG_ON(!nsize);
2147 
2148 			drm_random_reorder(order, total_size, &prng);
2149 			err = evict_color(&mm, 0, U64_MAX,
2150 					  nodes, order, total_size,
2151 					  nsize, n, color++,
2152 					  mode);
2153 			if (err) {
2154 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2155 				       mode->name, nsize, n);
2156 				goto out;
2157 			}
2158 		}
2159 
2160 		cond_resched();
2161 	}
2162 
2163 	ret = 0;
2164 out:
2165 	if (ret)
2166 		show_mm(&mm);
2167 	drm_mm_for_each_node_safe(node, next, &mm)
2168 		drm_mm_remove_node(node);
2169 	drm_mm_takedown(&mm);
2170 	kfree(order);
2171 err_nodes:
2172 	vfree(nodes);
2173 err:
2174 	return ret;
2175 }
2176 
igt_color_evict_range(void * ignored)2177 static int igt_color_evict_range(void *ignored)
2178 {
2179 	DRM_RND_STATE(prng, random_seed);
2180 	const unsigned int total_size = 8192;
2181 	const unsigned int range_size = total_size / 2;
2182 	const unsigned int range_start = total_size / 4;
2183 	const unsigned int range_end = range_start + range_size;
2184 	const struct insert_mode *mode;
2185 	unsigned long color = 0;
2186 	struct drm_mm mm;
2187 	struct evict_node *nodes;
2188 	struct drm_mm_node *node, *next;
2189 	unsigned int *order, n;
2190 	int ret, err;
2191 
2192 	/* Like igt_color_evict(), but limited to small portion of the full
2193 	 * drm_mm range.
2194 	 */
2195 
2196 	ret = -ENOMEM;
2197 	nodes = vzalloc(total_size * sizeof(*nodes));
2198 	if (!nodes)
2199 		goto err;
2200 
2201 	order = drm_random_order(total_size, &prng);
2202 	if (!order)
2203 		goto err_nodes;
2204 
2205 	ret = -EINVAL;
2206 	drm_mm_init(&mm, 0, 2*total_size - 1);
2207 	mm.color_adjust = separate_adjacent_colors;
2208 	for (n = 0; n < total_size; n++) {
2209 		if (!expect_insert(&mm, &nodes[n].node,
2210 				   1, 0, color++,
2211 				   &insert_modes[0])) {
2212 			pr_err("insert failed, step %d\n", n);
2213 			goto out;
2214 		}
2215 	}
2216 
2217 	for (mode = evict_modes; mode->name; mode++) {
2218 		for (n = 1; n <= range_size; n <<= 1) {
2219 			drm_random_reorder(order, range_size, &prng);
2220 			err = evict_color(&mm, range_start, range_end,
2221 					  nodes, order, total_size,
2222 					  n, 1, color++,
2223 					  mode);
2224 			if (err) {
2225 				pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2226 				       mode->name, n, range_start, range_end);
2227 				goto out;
2228 			}
2229 		}
2230 
2231 		for (n = 1; n < range_size; n <<= 1) {
2232 			drm_random_reorder(order, total_size, &prng);
2233 			err = evict_color(&mm, range_start, range_end,
2234 					  nodes, order, total_size,
2235 					  range_size/2, n, color++,
2236 					  mode);
2237 			if (err) {
2238 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2239 				       mode->name, total_size/2, n, range_start, range_end);
2240 				goto out;
2241 			}
2242 		}
2243 
2244 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2245 			unsigned int nsize = (range_size - n + 1) / 2;
2246 
2247 			DRM_MM_BUG_ON(!nsize);
2248 
2249 			drm_random_reorder(order, total_size, &prng);
2250 			err = evict_color(&mm, range_start, range_end,
2251 					  nodes, order, total_size,
2252 					  nsize, n, color++,
2253 					  mode);
2254 			if (err) {
2255 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2256 				       mode->name, nsize, n, range_start, range_end);
2257 				goto out;
2258 			}
2259 		}
2260 
2261 		cond_resched();
2262 	}
2263 
2264 	ret = 0;
2265 out:
2266 	if (ret)
2267 		show_mm(&mm);
2268 	drm_mm_for_each_node_safe(node, next, &mm)
2269 		drm_mm_remove_node(node);
2270 	drm_mm_takedown(&mm);
2271 	kfree(order);
2272 err_nodes:
2273 	vfree(nodes);
2274 err:
2275 	return ret;
2276 }
2277 
2278 #include "drm_selftest.c"
2279 
test_drm_mm_init(void)2280 static int __init test_drm_mm_init(void)
2281 {
2282 	int err;
2283 
2284 	while (!random_seed)
2285 		random_seed = get_random_int();
2286 
2287 	pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2288 		random_seed, max_iterations, max_prime);
2289 	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2290 
2291 	return err > 0 ? 0 : err;
2292 }
2293 
test_drm_mm_exit(void)2294 static void __exit test_drm_mm_exit(void)
2295 {
2296 }
2297 
2298 module_init(test_drm_mm_init);
2299 module_exit(test_drm_mm_exit);
2300 
2301 module_param(random_seed, uint, 0400);
2302 module_param(max_iterations, uint, 0400);
2303 module_param(max_prime, uint, 0400);
2304 
2305 MODULE_AUTHOR("Intel Corporation");
2306 MODULE_LICENSE("GPL");
2307