1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/prime_numbers.h>
7
8 #include "../i915_selftest.h"
9 #include "i915_random.h"
10
__igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block,bool buddy)11 static void __igt_dump_block(struct i915_buddy_mm *mm,
12 struct i915_buddy_block *block,
13 bool buddy)
14 {
15 pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
16 block->header,
17 i915_buddy_block_state(block),
18 i915_buddy_block_order(block),
19 i915_buddy_block_offset(block),
20 i915_buddy_block_size(mm, block),
21 yesno(!block->parent),
22 yesno(buddy));
23 }
24
igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)25 static void igt_dump_block(struct i915_buddy_mm *mm,
26 struct i915_buddy_block *block)
27 {
28 struct i915_buddy_block *buddy;
29
30 __igt_dump_block(mm, block, false);
31
32 buddy = get_buddy(block);
33 if (buddy)
34 __igt_dump_block(mm, buddy, true);
35 }
36
igt_check_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)37 static int igt_check_block(struct i915_buddy_mm *mm,
38 struct i915_buddy_block *block)
39 {
40 struct i915_buddy_block *buddy;
41 unsigned int block_state;
42 u64 block_size;
43 u64 offset;
44 int err = 0;
45
46 block_state = i915_buddy_block_state(block);
47
48 if (block_state != I915_BUDDY_ALLOCATED &&
49 block_state != I915_BUDDY_FREE &&
50 block_state != I915_BUDDY_SPLIT) {
51 pr_err("block state mismatch\n");
52 err = -EINVAL;
53 }
54
55 block_size = i915_buddy_block_size(mm, block);
56 offset = i915_buddy_block_offset(block);
57
58 if (block_size < mm->chunk_size) {
59 pr_err("block size smaller than min size\n");
60 err = -EINVAL;
61 }
62
63 if (!is_power_of_2(block_size)) {
64 pr_err("block size not power of two\n");
65 err = -EINVAL;
66 }
67
68 if (!IS_ALIGNED(block_size, mm->chunk_size)) {
69 pr_err("block size not aligned to min size\n");
70 err = -EINVAL;
71 }
72
73 if (!IS_ALIGNED(offset, mm->chunk_size)) {
74 pr_err("block offset not aligned to min size\n");
75 err = -EINVAL;
76 }
77
78 if (!IS_ALIGNED(offset, block_size)) {
79 pr_err("block offset not aligned to block size\n");
80 err = -EINVAL;
81 }
82
83 buddy = get_buddy(block);
84
85 if (!buddy && block->parent) {
86 pr_err("buddy has gone fishing\n");
87 err = -EINVAL;
88 }
89
90 if (buddy) {
91 if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
92 pr_err("buddy has wrong offset\n");
93 err = -EINVAL;
94 }
95
96 if (i915_buddy_block_size(mm, buddy) != block_size) {
97 pr_err("buddy size mismatch\n");
98 err = -EINVAL;
99 }
100
101 if (i915_buddy_block_state(buddy) == block_state &&
102 block_state == I915_BUDDY_FREE) {
103 pr_err("block and its buddy are free\n");
104 err = -EINVAL;
105 }
106 }
107
108 return err;
109 }
110
igt_check_blocks(struct i915_buddy_mm * mm,struct list_head * blocks,u64 expected_size,bool is_contiguous)111 static int igt_check_blocks(struct i915_buddy_mm *mm,
112 struct list_head *blocks,
113 u64 expected_size,
114 bool is_contiguous)
115 {
116 struct i915_buddy_block *block;
117 struct i915_buddy_block *prev;
118 u64 total;
119 int err = 0;
120
121 block = NULL;
122 prev = NULL;
123 total = 0;
124
125 list_for_each_entry(block, blocks, link) {
126 err = igt_check_block(mm, block);
127
128 if (!i915_buddy_block_is_allocated(block)) {
129 pr_err("block not allocated\n"),
130 err = -EINVAL;
131 }
132
133 if (is_contiguous && prev) {
134 u64 prev_block_size;
135 u64 prev_offset;
136 u64 offset;
137
138 prev_offset = i915_buddy_block_offset(prev);
139 prev_block_size = i915_buddy_block_size(mm, prev);
140 offset = i915_buddy_block_offset(block);
141
142 if (offset != (prev_offset + prev_block_size)) {
143 pr_err("block offset mismatch\n");
144 err = -EINVAL;
145 }
146 }
147
148 if (err)
149 break;
150
151 total += i915_buddy_block_size(mm, block);
152 prev = block;
153 }
154
155 if (!err) {
156 if (total != expected_size) {
157 pr_err("size mismatch, expected=%llx, found=%llx\n",
158 expected_size, total);
159 err = -EINVAL;
160 }
161 return err;
162 }
163
164 if (prev) {
165 pr_err("prev block, dump:\n");
166 igt_dump_block(mm, prev);
167 }
168
169 if (block) {
170 pr_err("bad block, dump:\n");
171 igt_dump_block(mm, block);
172 }
173
174 return err;
175 }
176
igt_check_mm(struct i915_buddy_mm * mm)177 static int igt_check_mm(struct i915_buddy_mm *mm)
178 {
179 struct i915_buddy_block *root;
180 struct i915_buddy_block *prev;
181 unsigned int i;
182 u64 total;
183 int err = 0;
184
185 if (!mm->n_roots) {
186 pr_err("n_roots is zero\n");
187 return -EINVAL;
188 }
189
190 if (mm->n_roots != hweight64(mm->size)) {
191 pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
192 mm->n_roots, hweight64(mm->size));
193 return -EINVAL;
194 }
195
196 root = NULL;
197 prev = NULL;
198 total = 0;
199
200 for (i = 0; i < mm->n_roots; ++i) {
201 struct i915_buddy_block *block;
202 unsigned int order;
203
204 root = mm->roots[i];
205 if (!root) {
206 pr_err("root(%u) is NULL\n", i);
207 err = -EINVAL;
208 break;
209 }
210
211 err = igt_check_block(mm, root);
212
213 if (!i915_buddy_block_is_free(root)) {
214 pr_err("root not free\n");
215 err = -EINVAL;
216 }
217
218 order = i915_buddy_block_order(root);
219
220 if (!i) {
221 if (order != mm->max_order) {
222 pr_err("max order root missing\n");
223 err = -EINVAL;
224 }
225 }
226
227 if (prev) {
228 u64 prev_block_size;
229 u64 prev_offset;
230 u64 offset;
231
232 prev_offset = i915_buddy_block_offset(prev);
233 prev_block_size = i915_buddy_block_size(mm, prev);
234 offset = i915_buddy_block_offset(root);
235
236 if (offset != (prev_offset + prev_block_size)) {
237 pr_err("root offset mismatch\n");
238 err = -EINVAL;
239 }
240 }
241
242 block = list_first_entry_or_null(&mm->free_list[order],
243 struct i915_buddy_block,
244 link);
245 if (block != root) {
246 pr_err("root mismatch at order=%u\n", order);
247 err = -EINVAL;
248 }
249
250 if (err)
251 break;
252
253 prev = root;
254 total += i915_buddy_block_size(mm, root);
255 }
256
257 if (!err) {
258 if (total != mm->size) {
259 pr_err("expected mm size=%llx, found=%llx\n", mm->size,
260 total);
261 err = -EINVAL;
262 }
263 return err;
264 }
265
266 if (prev) {
267 pr_err("prev root(%u), dump:\n", i - 1);
268 igt_dump_block(mm, prev);
269 }
270
271 if (root) {
272 pr_err("bad root(%u), dump:\n", i);
273 igt_dump_block(mm, root);
274 }
275
276 return err;
277 }
278
igt_mm_config(u64 * size,u64 * chunk_size)279 static void igt_mm_config(u64 *size, u64 *chunk_size)
280 {
281 I915_RND_STATE(prng);
282 u32 s, ms;
283
284 /* Nothing fancy, just try to get an interesting bit pattern */
285
286 prandom_seed_state(&prng, i915_selftest.random_seed);
287
288 /* Let size be a random number of pages up to 8 GB (2M pages) */
289 s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
290 /* Let the chunk size be a random power of 2 less than size */
291 ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
292 /* Round size down to the chunk size */
293 s &= -ms;
294
295 /* Convert from pages to bytes */
296 *chunk_size = (u64)ms << 12;
297 *size = (u64)s << 12;
298 }
299
igt_buddy_alloc_smoke(void * arg)300 static int igt_buddy_alloc_smoke(void *arg)
301 {
302 struct i915_buddy_mm mm;
303 IGT_TIMEOUT(end_time);
304 I915_RND_STATE(prng);
305 u64 chunk_size;
306 u64 mm_size;
307 int *order;
308 int err, i;
309
310 igt_mm_config(&mm_size, &chunk_size);
311
312 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
313
314 err = i915_buddy_init(&mm, mm_size, chunk_size);
315 if (err) {
316 pr_err("buddy_init failed(%d)\n", err);
317 return err;
318 }
319
320 order = i915_random_order(mm.max_order + 1, &prng);
321 if (!order)
322 goto out_fini;
323
324 for (i = 0; i <= mm.max_order; ++i) {
325 struct i915_buddy_block *block;
326 int max_order = order[i];
327 bool timeout = false;
328 LIST_HEAD(blocks);
329 int order;
330 u64 total;
331
332 err = igt_check_mm(&mm);
333 if (err) {
334 pr_err("pre-mm check failed, abort\n");
335 break;
336 }
337
338 pr_info("filling from max_order=%u\n", max_order);
339
340 order = max_order;
341 total = 0;
342
343 do {
344 retry:
345 block = i915_buddy_alloc(&mm, order);
346 if (IS_ERR(block)) {
347 err = PTR_ERR(block);
348 if (err == -ENOMEM) {
349 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
350 order);
351 } else {
352 if (order--) {
353 err = 0;
354 goto retry;
355 }
356
357 pr_err("buddy_alloc with order=%d failed(%d)\n",
358 order, err);
359 }
360
361 break;
362 }
363
364 list_add_tail(&block->link, &blocks);
365
366 if (i915_buddy_block_order(block) != order) {
367 pr_err("buddy_alloc order mismatch\n");
368 err = -EINVAL;
369 break;
370 }
371
372 total += i915_buddy_block_size(&mm, block);
373
374 if (__igt_timeout(end_time, NULL)) {
375 timeout = true;
376 break;
377 }
378 } while (total < mm.size);
379
380 if (!err)
381 err = igt_check_blocks(&mm, &blocks, total, false);
382
383 i915_buddy_free_list(&mm, &blocks);
384
385 if (!err) {
386 err = igt_check_mm(&mm);
387 if (err)
388 pr_err("post-mm check failed\n");
389 }
390
391 if (err || timeout)
392 break;
393
394 cond_resched();
395 }
396
397 if (err == -ENOMEM)
398 err = 0;
399
400 kfree(order);
401 out_fini:
402 i915_buddy_fini(&mm);
403
404 return err;
405 }
406
igt_buddy_alloc_pessimistic(void * arg)407 static int igt_buddy_alloc_pessimistic(void *arg)
408 {
409 const unsigned int max_order = 16;
410 struct i915_buddy_block *block, *bn;
411 struct i915_buddy_mm mm;
412 unsigned int order;
413 LIST_HEAD(blocks);
414 int err;
415
416 /*
417 * Create a pot-sized mm, then allocate one of each possible
418 * order within. This should leave the mm with exactly one
419 * page left.
420 */
421
422 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
423 if (err) {
424 pr_err("buddy_init failed(%d)\n", err);
425 return err;
426 }
427 GEM_BUG_ON(mm.max_order != max_order);
428
429 for (order = 0; order < max_order; order++) {
430 block = i915_buddy_alloc(&mm, order);
431 if (IS_ERR(block)) {
432 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
433 order);
434 err = PTR_ERR(block);
435 goto err;
436 }
437
438 list_add_tail(&block->link, &blocks);
439 }
440
441 /* And now the last remaining block available */
442 block = i915_buddy_alloc(&mm, 0);
443 if (IS_ERR(block)) {
444 pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
445 err = PTR_ERR(block);
446 goto err;
447 }
448 list_add_tail(&block->link, &blocks);
449
450 /* Should be completely full! */
451 for (order = max_order; order--; ) {
452 block = i915_buddy_alloc(&mm, order);
453 if (!IS_ERR(block)) {
454 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
455 order);
456 list_add_tail(&block->link, &blocks);
457 err = -EINVAL;
458 goto err;
459 }
460 }
461
462 block = list_last_entry(&blocks, typeof(*block), link);
463 list_del(&block->link);
464 i915_buddy_free(&mm, block);
465
466 /* As we free in increasing size, we make available larger blocks */
467 order = 1;
468 list_for_each_entry_safe(block, bn, &blocks, link) {
469 list_del(&block->link);
470 i915_buddy_free(&mm, block);
471
472 block = i915_buddy_alloc(&mm, order);
473 if (IS_ERR(block)) {
474 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
475 order);
476 err = PTR_ERR(block);
477 goto err;
478 }
479 i915_buddy_free(&mm, block);
480 order++;
481 }
482
483 /* To confirm, now the whole mm should be available */
484 block = i915_buddy_alloc(&mm, max_order);
485 if (IS_ERR(block)) {
486 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
487 max_order);
488 err = PTR_ERR(block);
489 goto err;
490 }
491 i915_buddy_free(&mm, block);
492
493 err:
494 i915_buddy_free_list(&mm, &blocks);
495 i915_buddy_fini(&mm);
496 return err;
497 }
498
igt_buddy_alloc_optimistic(void * arg)499 static int igt_buddy_alloc_optimistic(void *arg)
500 {
501 const int max_order = 16;
502 struct i915_buddy_block *block;
503 struct i915_buddy_mm mm;
504 LIST_HEAD(blocks);
505 int order;
506 int err;
507
508 /*
509 * Create a mm with one block of each order available, and
510 * try to allocate them all.
511 */
512
513 err = i915_buddy_init(&mm,
514 PAGE_SIZE * ((1 << (max_order + 1)) - 1),
515 PAGE_SIZE);
516 if (err) {
517 pr_err("buddy_init failed(%d)\n", err);
518 return err;
519 }
520 GEM_BUG_ON(mm.max_order != max_order);
521
522 for (order = 0; order <= max_order; order++) {
523 block = i915_buddy_alloc(&mm, order);
524 if (IS_ERR(block)) {
525 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
526 order);
527 err = PTR_ERR(block);
528 goto err;
529 }
530
531 list_add_tail(&block->link, &blocks);
532 }
533
534 /* Should be completely full! */
535 block = i915_buddy_alloc(&mm, 0);
536 if (!IS_ERR(block)) {
537 pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
538 list_add_tail(&block->link, &blocks);
539 err = -EINVAL;
540 goto err;
541 }
542
543 err:
544 i915_buddy_free_list(&mm, &blocks);
545 i915_buddy_fini(&mm);
546 return err;
547 }
548
igt_buddy_alloc_pathological(void * arg)549 static int igt_buddy_alloc_pathological(void *arg)
550 {
551 const int max_order = 16;
552 struct i915_buddy_block *block;
553 struct i915_buddy_mm mm;
554 LIST_HEAD(blocks);
555 LIST_HEAD(holes);
556 int order, top;
557 int err;
558
559 /*
560 * Create a pot-sized mm, then allocate one of each possible
561 * order within. This should leave the mm with exactly one
562 * page left. Free the largest block, then whittle down again.
563 * Eventually we will have a fully 50% fragmented mm.
564 */
565
566 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
567 if (err) {
568 pr_err("buddy_init failed(%d)\n", err);
569 return err;
570 }
571 GEM_BUG_ON(mm.max_order != max_order);
572
573 for (top = max_order; top; top--) {
574 /* Make room by freeing the largest allocated block */
575 block = list_first_entry_or_null(&blocks, typeof(*block), link);
576 if (block) {
577 list_del(&block->link);
578 i915_buddy_free(&mm, block);
579 }
580
581 for (order = top; order--; ) {
582 block = i915_buddy_alloc(&mm, order);
583 if (IS_ERR(block)) {
584 pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
585 order, top);
586 err = PTR_ERR(block);
587 goto err;
588 }
589 list_add_tail(&block->link, &blocks);
590 }
591
592 /* There should be one final page for this sub-allocation */
593 block = i915_buddy_alloc(&mm, 0);
594 if (IS_ERR(block)) {
595 pr_info("buddy_alloc hit -ENOMEM for hole\n");
596 err = PTR_ERR(block);
597 goto err;
598 }
599 list_add_tail(&block->link, &holes);
600
601 block = i915_buddy_alloc(&mm, top);
602 if (!IS_ERR(block)) {
603 pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
604 top, max_order);
605 list_add_tail(&block->link, &blocks);
606 err = -EINVAL;
607 goto err;
608 }
609 }
610
611 i915_buddy_free_list(&mm, &holes);
612
613 /* Nothing larger than blocks of chunk_size now available */
614 for (order = 1; order <= max_order; order++) {
615 block = i915_buddy_alloc(&mm, order);
616 if (!IS_ERR(block)) {
617 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
618 order);
619 list_add_tail(&block->link, &blocks);
620 err = -EINVAL;
621 goto err;
622 }
623 }
624
625 err:
626 list_splice_tail(&holes, &blocks);
627 i915_buddy_free_list(&mm, &blocks);
628 i915_buddy_fini(&mm);
629 return err;
630 }
631
igt_buddy_alloc_range(void * arg)632 static int igt_buddy_alloc_range(void *arg)
633 {
634 struct i915_buddy_mm mm;
635 unsigned long page_num;
636 LIST_HEAD(blocks);
637 u64 chunk_size;
638 u64 offset;
639 u64 size;
640 u64 rem;
641 int err;
642
643 igt_mm_config(&size, &chunk_size);
644
645 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
646
647 err = i915_buddy_init(&mm, size, chunk_size);
648 if (err) {
649 pr_err("buddy_init failed(%d)\n", err);
650 return err;
651 }
652
653 err = igt_check_mm(&mm);
654 if (err) {
655 pr_err("pre-mm check failed, abort, abort, abort!\n");
656 goto err_fini;
657 }
658
659 rem = mm.size;
660 offset = 0;
661
662 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
663 struct i915_buddy_block *block;
664 LIST_HEAD(tmp);
665
666 size = min(page_num * mm.chunk_size, rem);
667
668 err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
669 if (err) {
670 if (err == -ENOMEM) {
671 pr_info("alloc_range hit -ENOMEM with size=%llx\n",
672 size);
673 } else {
674 pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
675 offset, size, err);
676 }
677
678 break;
679 }
680
681 block = list_first_entry_or_null(&tmp,
682 struct i915_buddy_block,
683 link);
684 if (!block) {
685 pr_err("alloc_range has no blocks\n");
686 err = -EINVAL;
687 break;
688 }
689
690 if (i915_buddy_block_offset(block) != offset) {
691 pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
692 i915_buddy_block_offset(block), offset);
693 err = -EINVAL;
694 }
695
696 if (!err)
697 err = igt_check_blocks(&mm, &tmp, size, true);
698
699 list_splice_tail(&tmp, &blocks);
700
701 if (err)
702 break;
703
704 offset += size;
705
706 rem -= size;
707 if (!rem)
708 break;
709
710 cond_resched();
711 }
712
713 if (err == -ENOMEM)
714 err = 0;
715
716 i915_buddy_free_list(&mm, &blocks);
717
718 if (!err) {
719 err = igt_check_mm(&mm);
720 if (err)
721 pr_err("post-mm check failed\n");
722 }
723
724 err_fini:
725 i915_buddy_fini(&mm);
726
727 return err;
728 }
729
i915_buddy_mock_selftests(void)730 int i915_buddy_mock_selftests(void)
731 {
732 static const struct i915_subtest tests[] = {
733 SUBTEST(igt_buddy_alloc_pessimistic),
734 SUBTEST(igt_buddy_alloc_optimistic),
735 SUBTEST(igt_buddy_alloc_pathological),
736 SUBTEST(igt_buddy_alloc_smoke),
737 SUBTEST(igt_buddy_alloc_range),
738 };
739
740 return i915_subtests(tests, NULL);
741 }
742