1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3
4 static int alloc_nid_test_flags = TEST_F_NONE;
5
6 /*
7 * contains the fraction of MEM_SIZE contained in each node in basis point
8 * units (one hundredth of 1% or 1/10000)
9 */
10 static const unsigned int node_fractions[] = {
11 2500, /* 1/4 */
12 625, /* 1/16 */
13 1250, /* 1/8 */
14 1250, /* 1/8 */
15 625, /* 1/16 */
16 625, /* 1/16 */
17 2500, /* 1/4 */
18 625, /* 1/16 */
19 };
20
get_memblock_alloc_try_nid_name(int flags)21 static inline const char * const get_memblock_alloc_try_nid_name(int flags)
22 {
23 if (flags & TEST_F_RAW)
24 return "memblock_alloc_try_nid_raw";
25 return "memblock_alloc_try_nid";
26 }
27
run_memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)28 static inline void *run_memblock_alloc_try_nid(phys_addr_t size,
29 phys_addr_t align,
30 phys_addr_t min_addr,
31 phys_addr_t max_addr, int nid)
32 {
33 if (alloc_nid_test_flags & TEST_F_RAW)
34 return memblock_alloc_try_nid_raw(size, align, min_addr,
35 max_addr, nid);
36 return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
37 }
38
39 /*
40 * A simple test that tries to allocate a memory region within min_addr and
41 * max_addr range:
42 *
43 * + +
44 * | + +-----------+ |
45 * | | | rgn | |
46 * +----+-------+-----------+------+
47 * ^ ^
48 * | |
49 * min_addr max_addr
50 *
51 * Expect to allocate a region that ends at max_addr.
52 */
alloc_try_nid_top_down_simple_check(void)53 static int alloc_try_nid_top_down_simple_check(void)
54 {
55 struct memblock_region *rgn = &memblock.reserved.regions[0];
56 void *allocated_ptr = NULL;
57 phys_addr_t size = SZ_128;
58 phys_addr_t min_addr;
59 phys_addr_t max_addr;
60 phys_addr_t rgn_end;
61
62 PREFIX_PUSH();
63 setup_memblock();
64
65 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
66 max_addr = min_addr + SZ_512;
67
68 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
69 min_addr, max_addr,
70 NUMA_NO_NODE);
71 rgn_end = rgn->base + rgn->size;
72
73 ASSERT_NE(allocated_ptr, NULL);
74 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
75
76 ASSERT_EQ(rgn->size, size);
77 ASSERT_EQ(rgn->base, max_addr - size);
78 ASSERT_EQ(rgn_end, max_addr);
79
80 ASSERT_EQ(memblock.reserved.cnt, 1);
81 ASSERT_EQ(memblock.reserved.total_size, size);
82
83 test_pass_pop();
84
85 return 0;
86 }
87
88 /*
89 * A simple test that tries to allocate a memory region within min_addr and
90 * max_addr range, where the end address is misaligned:
91 *
92 * + + +
93 * | + +---------+ + |
94 * | | | rgn | | |
95 * +------+-------+---------+--+----+
96 * ^ ^ ^
97 * | | |
98 * min_add | max_addr
99 * |
100 * Aligned address
101 * boundary
102 *
103 * Expect to allocate an aligned region that ends before max_addr.
104 */
alloc_try_nid_top_down_end_misaligned_check(void)105 static int alloc_try_nid_top_down_end_misaligned_check(void)
106 {
107 struct memblock_region *rgn = &memblock.reserved.regions[0];
108 void *allocated_ptr = NULL;
109 phys_addr_t size = SZ_128;
110 phys_addr_t misalign = SZ_2;
111 phys_addr_t min_addr;
112 phys_addr_t max_addr;
113 phys_addr_t rgn_end;
114
115 PREFIX_PUSH();
116 setup_memblock();
117
118 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
119 max_addr = min_addr + SZ_512 + misalign;
120
121 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
122 min_addr, max_addr,
123 NUMA_NO_NODE);
124 rgn_end = rgn->base + rgn->size;
125
126 ASSERT_NE(allocated_ptr, NULL);
127 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
128
129 ASSERT_EQ(rgn->size, size);
130 ASSERT_EQ(rgn->base, max_addr - size - misalign);
131 ASSERT_LT(rgn_end, max_addr);
132
133 ASSERT_EQ(memblock.reserved.cnt, 1);
134 ASSERT_EQ(memblock.reserved.total_size, size);
135
136 test_pass_pop();
137
138 return 0;
139 }
140
141 /*
142 * A simple test that tries to allocate a memory region, which spans over the
143 * min_addr and max_addr range:
144 *
145 * + +
146 * | +---------------+ |
147 * | | rgn | |
148 * +------+---------------+-------+
149 * ^ ^
150 * | |
151 * min_addr max_addr
152 *
153 * Expect to allocate a region that starts at min_addr and ends at
154 * max_addr, given that min_addr is aligned.
155 */
alloc_try_nid_exact_address_generic_check(void)156 static int alloc_try_nid_exact_address_generic_check(void)
157 {
158 struct memblock_region *rgn = &memblock.reserved.regions[0];
159 void *allocated_ptr = NULL;
160 phys_addr_t size = SZ_1K;
161 phys_addr_t min_addr;
162 phys_addr_t max_addr;
163 phys_addr_t rgn_end;
164
165 PREFIX_PUSH();
166 setup_memblock();
167
168 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
169 max_addr = min_addr + size;
170
171 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
172 min_addr, max_addr,
173 NUMA_NO_NODE);
174 rgn_end = rgn->base + rgn->size;
175
176 ASSERT_NE(allocated_ptr, NULL);
177 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
178
179 ASSERT_EQ(rgn->size, size);
180 ASSERT_EQ(rgn->base, min_addr);
181 ASSERT_EQ(rgn_end, max_addr);
182
183 ASSERT_EQ(memblock.reserved.cnt, 1);
184 ASSERT_EQ(memblock.reserved.total_size, size);
185
186 test_pass_pop();
187
188 return 0;
189 }
190
191 /*
192 * A test that tries to allocate a memory region, which can't fit into
193 * min_addr and max_addr range:
194 *
195 * + + +
196 * | +----------+-----+ |
197 * | | rgn + | |
198 * +--------+----------+-----+----+
199 * ^ ^ ^
200 * | | |
201 * Aligned | max_addr
202 * address |
203 * boundary min_add
204 *
205 * Expect to drop the lower limit and allocate a memory region which
206 * ends at max_addr (if the address is aligned).
207 */
alloc_try_nid_top_down_narrow_range_check(void)208 static int alloc_try_nid_top_down_narrow_range_check(void)
209 {
210 struct memblock_region *rgn = &memblock.reserved.regions[0];
211 void *allocated_ptr = NULL;
212 phys_addr_t size = SZ_256;
213 phys_addr_t min_addr;
214 phys_addr_t max_addr;
215
216 PREFIX_PUSH();
217 setup_memblock();
218
219 min_addr = memblock_start_of_DRAM() + SZ_512;
220 max_addr = min_addr + SMP_CACHE_BYTES;
221
222 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
223 min_addr, max_addr,
224 NUMA_NO_NODE);
225
226 ASSERT_NE(allocated_ptr, NULL);
227 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
228
229 ASSERT_EQ(rgn->size, size);
230 ASSERT_EQ(rgn->base, max_addr - size);
231
232 ASSERT_EQ(memblock.reserved.cnt, 1);
233 ASSERT_EQ(memblock.reserved.total_size, size);
234
235 test_pass_pop();
236
237 return 0;
238 }
239
240 /*
241 * A test that tries to allocate a memory region, which can't fit into
242 * min_addr and max_addr range, with the latter being too close to the beginning
243 * of the available memory:
244 *
245 * +-------------+
246 * | new |
247 * +-------------+
248 * + +
249 * | + |
250 * | | |
251 * +-------+--------------+
252 * ^ ^
253 * | |
254 * | max_addr
255 * |
256 * min_addr
257 *
258 * Expect no allocation to happen.
259 */
alloc_try_nid_low_max_generic_check(void)260 static int alloc_try_nid_low_max_generic_check(void)
261 {
262 void *allocated_ptr = NULL;
263 phys_addr_t size = SZ_1K;
264 phys_addr_t min_addr;
265 phys_addr_t max_addr;
266
267 PREFIX_PUSH();
268 setup_memblock();
269
270 min_addr = memblock_start_of_DRAM();
271 max_addr = min_addr + SMP_CACHE_BYTES;
272
273 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
274 min_addr, max_addr,
275 NUMA_NO_NODE);
276
277 ASSERT_EQ(allocated_ptr, NULL);
278
279 test_pass_pop();
280
281 return 0;
282 }
283
284 /*
285 * A test that tries to allocate a memory region within min_addr min_addr range,
286 * with min_addr being so close that it's next to an allocated region:
287 *
288 * + +
289 * | +--------+---------------|
290 * | | r1 | rgn |
291 * +-------+--------+---------------+
292 * ^ ^
293 * | |
294 * min_addr max_addr
295 *
296 * Expect a merge of both regions. Only the region size gets updated.
297 */
alloc_try_nid_min_reserved_generic_check(void)298 static int alloc_try_nid_min_reserved_generic_check(void)
299 {
300 struct memblock_region *rgn = &memblock.reserved.regions[0];
301 void *allocated_ptr = NULL;
302 phys_addr_t r1_size = SZ_128;
303 phys_addr_t r2_size = SZ_64;
304 phys_addr_t total_size = r1_size + r2_size;
305 phys_addr_t min_addr;
306 phys_addr_t max_addr;
307 phys_addr_t reserved_base;
308
309 PREFIX_PUSH();
310 setup_memblock();
311
312 max_addr = memblock_end_of_DRAM();
313 min_addr = max_addr - r2_size;
314 reserved_base = min_addr - r1_size;
315
316 memblock_reserve(reserved_base, r1_size);
317
318 allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
319 min_addr, max_addr,
320 NUMA_NO_NODE);
321
322 ASSERT_NE(allocated_ptr, NULL);
323 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
324
325 ASSERT_EQ(rgn->size, total_size);
326 ASSERT_EQ(rgn->base, reserved_base);
327
328 ASSERT_EQ(memblock.reserved.cnt, 1);
329 ASSERT_EQ(memblock.reserved.total_size, total_size);
330
331 test_pass_pop();
332
333 return 0;
334 }
335
336 /*
337 * A test that tries to allocate a memory region within min_addr and max_addr,
338 * with max_addr being so close that it's next to an allocated region:
339 *
340 * + +
341 * | +-------------+--------|
342 * | | rgn | r1 |
343 * +----------+-------------+--------+
344 * ^ ^
345 * | |
346 * min_addr max_addr
347 *
348 * Expect a merge of regions. Only the region size gets updated.
349 */
alloc_try_nid_max_reserved_generic_check(void)350 static int alloc_try_nid_max_reserved_generic_check(void)
351 {
352 struct memblock_region *rgn = &memblock.reserved.regions[0];
353 void *allocated_ptr = NULL;
354 phys_addr_t r1_size = SZ_64;
355 phys_addr_t r2_size = SZ_128;
356 phys_addr_t total_size = r1_size + r2_size;
357 phys_addr_t min_addr;
358 phys_addr_t max_addr;
359
360 PREFIX_PUSH();
361 setup_memblock();
362
363 max_addr = memblock_end_of_DRAM() - r1_size;
364 min_addr = max_addr - r2_size;
365
366 memblock_reserve(max_addr, r1_size);
367
368 allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
369 min_addr, max_addr,
370 NUMA_NO_NODE);
371
372 ASSERT_NE(allocated_ptr, NULL);
373 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
374
375 ASSERT_EQ(rgn->size, total_size);
376 ASSERT_EQ(rgn->base, min_addr);
377
378 ASSERT_EQ(memblock.reserved.cnt, 1);
379 ASSERT_EQ(memblock.reserved.total_size, total_size);
380
381 test_pass_pop();
382
383 return 0;
384 }
385
386 /*
387 * A test that tries to allocate memory within min_addr and max_add range, when
388 * there are two reserved regions at the borders, with a gap big enough to fit
389 * a new region:
390 *
391 * + +
392 * | +--------+ +-------+------+ |
393 * | | r2 | | rgn | r1 | |
394 * +----+--------+---+-------+------+--+
395 * ^ ^
396 * | |
397 * min_addr max_addr
398 *
399 * Expect to merge the new region with r1. The second region does not get
400 * updated. The total size field gets updated.
401 */
402
alloc_try_nid_top_down_reserved_with_space_check(void)403 static int alloc_try_nid_top_down_reserved_with_space_check(void)
404 {
405 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
406 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
407 void *allocated_ptr = NULL;
408 struct region r1, r2;
409 phys_addr_t r3_size = SZ_64;
410 phys_addr_t gap_size = SMP_CACHE_BYTES;
411 phys_addr_t total_size;
412 phys_addr_t max_addr;
413 phys_addr_t min_addr;
414
415 PREFIX_PUSH();
416 setup_memblock();
417
418 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
419 r1.size = SMP_CACHE_BYTES;
420
421 r2.size = SZ_128;
422 r2.base = r1.base - (r3_size + gap_size + r2.size);
423
424 total_size = r1.size + r2.size + r3_size;
425 min_addr = r2.base + r2.size;
426 max_addr = r1.base;
427
428 memblock_reserve(r1.base, r1.size);
429 memblock_reserve(r2.base, r2.size);
430
431 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
432 min_addr, max_addr,
433 NUMA_NO_NODE);
434
435 ASSERT_NE(allocated_ptr, NULL);
436 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
437
438 ASSERT_EQ(rgn1->size, r1.size + r3_size);
439 ASSERT_EQ(rgn1->base, max_addr - r3_size);
440
441 ASSERT_EQ(rgn2->size, r2.size);
442 ASSERT_EQ(rgn2->base, r2.base);
443
444 ASSERT_EQ(memblock.reserved.cnt, 2);
445 ASSERT_EQ(memblock.reserved.total_size, total_size);
446
447 test_pass_pop();
448
449 return 0;
450 }
451
452 /*
453 * A test that tries to allocate memory within min_addr and max_add range, when
454 * there are two reserved regions at the borders, with a gap of a size equal to
455 * the size of the new region:
456 *
457 * + +
458 * | +--------+--------+--------+ |
459 * | | r2 | r3 | r1 | |
460 * +-----+--------+--------+--------+-----+
461 * ^ ^
462 * | |
463 * min_addr max_addr
464 *
465 * Expect to merge all of the regions into one. The region counter and total
466 * size fields get updated.
467 */
alloc_try_nid_reserved_full_merge_generic_check(void)468 static int alloc_try_nid_reserved_full_merge_generic_check(void)
469 {
470 struct memblock_region *rgn = &memblock.reserved.regions[0];
471 void *allocated_ptr = NULL;
472 struct region r1, r2;
473 phys_addr_t r3_size = SZ_64;
474 phys_addr_t total_size;
475 phys_addr_t max_addr;
476 phys_addr_t min_addr;
477
478 PREFIX_PUSH();
479 setup_memblock();
480
481 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
482 r1.size = SMP_CACHE_BYTES;
483
484 r2.size = SZ_128;
485 r2.base = r1.base - (r3_size + r2.size);
486
487 total_size = r1.size + r2.size + r3_size;
488 min_addr = r2.base + r2.size;
489 max_addr = r1.base;
490
491 memblock_reserve(r1.base, r1.size);
492 memblock_reserve(r2.base, r2.size);
493
494 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
495 min_addr, max_addr,
496 NUMA_NO_NODE);
497
498 ASSERT_NE(allocated_ptr, NULL);
499 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
500
501 ASSERT_EQ(rgn->size, total_size);
502 ASSERT_EQ(rgn->base, r2.base);
503
504 ASSERT_EQ(memblock.reserved.cnt, 1);
505 ASSERT_EQ(memblock.reserved.total_size, total_size);
506
507 test_pass_pop();
508
509 return 0;
510 }
511
512 /*
513 * A test that tries to allocate memory within min_addr and max_add range, when
514 * there are two reserved regions at the borders, with a gap that can't fit
515 * a new region:
516 *
517 * + +
518 * | +----------+------+ +------+ |
519 * | | r3 | r2 | | r1 | |
520 * +--+----------+------+----+------+---+
521 * ^ ^
522 * | |
523 * | max_addr
524 * |
525 * min_addr
526 *
527 * Expect to merge the new region with r2. The second region does not get
528 * updated. The total size counter gets updated.
529 */
alloc_try_nid_top_down_reserved_no_space_check(void)530 static int alloc_try_nid_top_down_reserved_no_space_check(void)
531 {
532 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
533 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
534 void *allocated_ptr = NULL;
535 struct region r1, r2;
536 phys_addr_t r3_size = SZ_256;
537 phys_addr_t gap_size = SMP_CACHE_BYTES;
538 phys_addr_t total_size;
539 phys_addr_t max_addr;
540 phys_addr_t min_addr;
541
542 PREFIX_PUSH();
543 setup_memblock();
544
545 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
546 r1.size = SMP_CACHE_BYTES;
547
548 r2.size = SZ_128;
549 r2.base = r1.base - (r2.size + gap_size);
550
551 total_size = r1.size + r2.size + r3_size;
552 min_addr = r2.base + r2.size;
553 max_addr = r1.base;
554
555 memblock_reserve(r1.base, r1.size);
556 memblock_reserve(r2.base, r2.size);
557
558 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
559 min_addr, max_addr,
560 NUMA_NO_NODE);
561
562 ASSERT_NE(allocated_ptr, NULL);
563 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
564
565 ASSERT_EQ(rgn1->size, r1.size);
566 ASSERT_EQ(rgn1->base, r1.base);
567
568 ASSERT_EQ(rgn2->size, r2.size + r3_size);
569 ASSERT_EQ(rgn2->base, r2.base - r3_size);
570
571 ASSERT_EQ(memblock.reserved.cnt, 2);
572 ASSERT_EQ(memblock.reserved.total_size, total_size);
573
574 test_pass_pop();
575
576 return 0;
577 }
578
579 /*
580 * A test that tries to allocate memory within min_addr and max_add range, but
581 * it's too narrow and everything else is reserved:
582 *
583 * +-----------+
584 * | new |
585 * +-----------+
586 * + +
587 * |--------------+ +----------|
588 * | r2 | | r1 |
589 * +--------------+------+----------+
590 * ^ ^
591 * | |
592 * | max_addr
593 * |
594 * min_addr
595 *
596 * Expect no allocation to happen.
597 */
598
alloc_try_nid_reserved_all_generic_check(void)599 static int alloc_try_nid_reserved_all_generic_check(void)
600 {
601 void *allocated_ptr = NULL;
602 struct region r1, r2;
603 phys_addr_t r3_size = SZ_256;
604 phys_addr_t gap_size = SMP_CACHE_BYTES;
605 phys_addr_t max_addr;
606 phys_addr_t min_addr;
607
608 PREFIX_PUSH();
609 setup_memblock();
610
611 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
612 r1.size = SMP_CACHE_BYTES;
613
614 r2.size = MEM_SIZE - (r1.size + gap_size);
615 r2.base = memblock_start_of_DRAM();
616
617 min_addr = r2.base + r2.size;
618 max_addr = r1.base;
619
620 memblock_reserve(r1.base, r1.size);
621 memblock_reserve(r2.base, r2.size);
622
623 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
624 min_addr, max_addr,
625 NUMA_NO_NODE);
626
627 ASSERT_EQ(allocated_ptr, NULL);
628
629 test_pass_pop();
630
631 return 0;
632 }
633
634 /*
635 * A test that tries to allocate a memory region, where max_addr is
636 * bigger than the end address of the available memory. Expect to allocate
637 * a region that ends before the end of the memory.
638 */
alloc_try_nid_top_down_cap_max_check(void)639 static int alloc_try_nid_top_down_cap_max_check(void)
640 {
641 struct memblock_region *rgn = &memblock.reserved.regions[0];
642 void *allocated_ptr = NULL;
643 phys_addr_t size = SZ_256;
644 phys_addr_t min_addr;
645 phys_addr_t max_addr;
646
647 PREFIX_PUSH();
648 setup_memblock();
649
650 min_addr = memblock_end_of_DRAM() - SZ_1K;
651 max_addr = memblock_end_of_DRAM() + SZ_256;
652
653 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
654 min_addr, max_addr,
655 NUMA_NO_NODE);
656
657 ASSERT_NE(allocated_ptr, NULL);
658 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
659
660 ASSERT_EQ(rgn->size, size);
661 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
662
663 ASSERT_EQ(memblock.reserved.cnt, 1);
664 ASSERT_EQ(memblock.reserved.total_size, size);
665
666 test_pass_pop();
667
668 return 0;
669 }
670
671 /*
672 * A test that tries to allocate a memory region, where min_addr is
673 * smaller than the start address of the available memory. Expect to allocate
674 * a region that ends before the end of the memory.
675 */
alloc_try_nid_top_down_cap_min_check(void)676 static int alloc_try_nid_top_down_cap_min_check(void)
677 {
678 struct memblock_region *rgn = &memblock.reserved.regions[0];
679 void *allocated_ptr = NULL;
680 phys_addr_t size = SZ_1K;
681 phys_addr_t min_addr;
682 phys_addr_t max_addr;
683
684 PREFIX_PUSH();
685 setup_memblock();
686
687 min_addr = memblock_start_of_DRAM() - SZ_256;
688 max_addr = memblock_end_of_DRAM();
689
690 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
691 min_addr, max_addr,
692 NUMA_NO_NODE);
693
694 ASSERT_NE(allocated_ptr, NULL);
695 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
696
697 ASSERT_EQ(rgn->size, size);
698 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
699
700 ASSERT_EQ(memblock.reserved.cnt, 1);
701 ASSERT_EQ(memblock.reserved.total_size, size);
702
703 test_pass_pop();
704
705 return 0;
706 }
707
708 /*
709 * A simple test that tries to allocate a memory region within min_addr and
710 * max_addr range:
711 *
712 * + +
713 * | +-----------+ | |
714 * | | rgn | | |
715 * +----+-----------+-----------+------+
716 * ^ ^
717 * | |
718 * min_addr max_addr
719 *
720 * Expect to allocate a region that ends before max_addr.
721 */
alloc_try_nid_bottom_up_simple_check(void)722 static int alloc_try_nid_bottom_up_simple_check(void)
723 {
724 struct memblock_region *rgn = &memblock.reserved.regions[0];
725 void *allocated_ptr = NULL;
726 phys_addr_t size = SZ_128;
727 phys_addr_t min_addr;
728 phys_addr_t max_addr;
729 phys_addr_t rgn_end;
730
731 PREFIX_PUSH();
732 setup_memblock();
733
734 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
735 max_addr = min_addr + SZ_512;
736
737 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
738 min_addr, max_addr,
739 NUMA_NO_NODE);
740 rgn_end = rgn->base + rgn->size;
741
742 ASSERT_NE(allocated_ptr, NULL);
743 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
744
745 ASSERT_EQ(rgn->size, size);
746 ASSERT_EQ(rgn->base, min_addr);
747 ASSERT_LT(rgn_end, max_addr);
748
749 ASSERT_EQ(memblock.reserved.cnt, 1);
750 ASSERT_EQ(memblock.reserved.total_size, size);
751
752 test_pass_pop();
753
754 return 0;
755 }
756
757 /*
758 * A simple test that tries to allocate a memory region within min_addr and
759 * max_addr range, where the start address is misaligned:
760 *
761 * + +
762 * | + +-----------+ + |
763 * | | | rgn | | |
764 * +-----+---+-----------+-----+-----+
765 * ^ ^----. ^
766 * | | |
767 * min_add | max_addr
768 * |
769 * Aligned address
770 * boundary
771 *
772 * Expect to allocate an aligned region that ends before max_addr.
773 */
alloc_try_nid_bottom_up_start_misaligned_check(void)774 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
775 {
776 struct memblock_region *rgn = &memblock.reserved.regions[0];
777 void *allocated_ptr = NULL;
778 phys_addr_t size = SZ_128;
779 phys_addr_t misalign = SZ_2;
780 phys_addr_t min_addr;
781 phys_addr_t max_addr;
782 phys_addr_t rgn_end;
783
784 PREFIX_PUSH();
785 setup_memblock();
786
787 min_addr = memblock_start_of_DRAM() + misalign;
788 max_addr = min_addr + SZ_512;
789
790 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
791 min_addr, max_addr,
792 NUMA_NO_NODE);
793 rgn_end = rgn->base + rgn->size;
794
795 ASSERT_NE(allocated_ptr, NULL);
796 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
797
798 ASSERT_EQ(rgn->size, size);
799 ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
800 ASSERT_LT(rgn_end, max_addr);
801
802 ASSERT_EQ(memblock.reserved.cnt, 1);
803 ASSERT_EQ(memblock.reserved.total_size, size);
804
805 test_pass_pop();
806
807 return 0;
808 }
809
810 /*
811 * A test that tries to allocate a memory region, which can't fit into min_addr
812 * and max_addr range:
813 *
814 * + +
815 * |---------+ + + |
816 * | rgn | | | |
817 * +---------+---------+----+------+
818 * ^ ^
819 * | |
820 * | max_addr
821 * |
822 * min_add
823 *
824 * Expect to drop the lower limit and allocate a memory region which
825 * starts at the beginning of the available memory.
826 */
alloc_try_nid_bottom_up_narrow_range_check(void)827 static int alloc_try_nid_bottom_up_narrow_range_check(void)
828 {
829 struct memblock_region *rgn = &memblock.reserved.regions[0];
830 void *allocated_ptr = NULL;
831 phys_addr_t size = SZ_256;
832 phys_addr_t min_addr;
833 phys_addr_t max_addr;
834
835 PREFIX_PUSH();
836 setup_memblock();
837
838 min_addr = memblock_start_of_DRAM() + SZ_512;
839 max_addr = min_addr + SMP_CACHE_BYTES;
840
841 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
842 min_addr, max_addr,
843 NUMA_NO_NODE);
844
845 ASSERT_NE(allocated_ptr, NULL);
846 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
847
848 ASSERT_EQ(rgn->size, size);
849 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
850
851 ASSERT_EQ(memblock.reserved.cnt, 1);
852 ASSERT_EQ(memblock.reserved.total_size, size);
853
854 test_pass_pop();
855
856 return 0;
857 }
858
859 /*
860 * A test that tries to allocate memory within min_addr and max_add range, when
861 * there are two reserved regions at the borders, with a gap big enough to fit
862 * a new region:
863 *
864 * + +
865 * | +--------+-------+ +------+ |
866 * | | r2 | rgn | | r1 | |
867 * +----+--------+-------+---+------+--+
868 * ^ ^
869 * | |
870 * min_addr max_addr
871 *
872 * Expect to merge the new region with r2. The second region does not get
873 * updated. The total size field gets updated.
874 */
875
alloc_try_nid_bottom_up_reserved_with_space_check(void)876 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
877 {
878 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
879 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
880 void *allocated_ptr = NULL;
881 struct region r1, r2;
882 phys_addr_t r3_size = SZ_64;
883 phys_addr_t gap_size = SMP_CACHE_BYTES;
884 phys_addr_t total_size;
885 phys_addr_t max_addr;
886 phys_addr_t min_addr;
887
888 PREFIX_PUSH();
889 setup_memblock();
890
891 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
892 r1.size = SMP_CACHE_BYTES;
893
894 r2.size = SZ_128;
895 r2.base = r1.base - (r3_size + gap_size + r2.size);
896
897 total_size = r1.size + r2.size + r3_size;
898 min_addr = r2.base + r2.size;
899 max_addr = r1.base;
900
901 memblock_reserve(r1.base, r1.size);
902 memblock_reserve(r2.base, r2.size);
903
904 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
905 min_addr, max_addr,
906 NUMA_NO_NODE);
907
908 ASSERT_NE(allocated_ptr, NULL);
909 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
910
911 ASSERT_EQ(rgn1->size, r1.size);
912 ASSERT_EQ(rgn1->base, max_addr);
913
914 ASSERT_EQ(rgn2->size, r2.size + r3_size);
915 ASSERT_EQ(rgn2->base, r2.base);
916
917 ASSERT_EQ(memblock.reserved.cnt, 2);
918 ASSERT_EQ(memblock.reserved.total_size, total_size);
919
920 test_pass_pop();
921
922 return 0;
923 }
924
925 /*
926 * A test that tries to allocate memory within min_addr and max_add range, when
927 * there are two reserved regions at the borders, with a gap of a size equal to
928 * the size of the new region:
929 *
930 * + +
931 * |----------+ +------+ +----+ |
932 * | r3 | | r2 | | r1 | |
933 * +----------+----+------+---+----+--+
934 * ^ ^
935 * | |
936 * | max_addr
937 * |
938 * min_addr
939 *
940 * Expect to drop the lower limit and allocate memory at the beginning of the
941 * available memory. The region counter and total size fields get updated.
942 * Other regions are not modified.
943 */
944
alloc_try_nid_bottom_up_reserved_no_space_check(void)945 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
946 {
947 struct memblock_region *rgn1 = &memblock.reserved.regions[2];
948 struct memblock_region *rgn2 = &memblock.reserved.regions[1];
949 struct memblock_region *rgn3 = &memblock.reserved.regions[0];
950 void *allocated_ptr = NULL;
951 struct region r1, r2;
952 phys_addr_t r3_size = SZ_256;
953 phys_addr_t gap_size = SMP_CACHE_BYTES;
954 phys_addr_t total_size;
955 phys_addr_t max_addr;
956 phys_addr_t min_addr;
957
958 PREFIX_PUSH();
959 setup_memblock();
960
961 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
962 r1.size = SMP_CACHE_BYTES;
963
964 r2.size = SZ_128;
965 r2.base = r1.base - (r2.size + gap_size);
966
967 total_size = r1.size + r2.size + r3_size;
968 min_addr = r2.base + r2.size;
969 max_addr = r1.base;
970
971 memblock_reserve(r1.base, r1.size);
972 memblock_reserve(r2.base, r2.size);
973
974 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
975 min_addr, max_addr,
976 NUMA_NO_NODE);
977
978 ASSERT_NE(allocated_ptr, NULL);
979 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
980
981 ASSERT_EQ(rgn3->size, r3_size);
982 ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
983
984 ASSERT_EQ(rgn2->size, r2.size);
985 ASSERT_EQ(rgn2->base, r2.base);
986
987 ASSERT_EQ(rgn1->size, r1.size);
988 ASSERT_EQ(rgn1->base, r1.base);
989
990 ASSERT_EQ(memblock.reserved.cnt, 3);
991 ASSERT_EQ(memblock.reserved.total_size, total_size);
992
993 test_pass_pop();
994
995 return 0;
996 }
997
998 /*
999 * A test that tries to allocate a memory region, where max_addr is
1000 * bigger than the end address of the available memory. Expect to allocate
1001 * a region that starts at the min_addr.
1002 */
alloc_try_nid_bottom_up_cap_max_check(void)1003 static int alloc_try_nid_bottom_up_cap_max_check(void)
1004 {
1005 struct memblock_region *rgn = &memblock.reserved.regions[0];
1006 void *allocated_ptr = NULL;
1007 phys_addr_t size = SZ_256;
1008 phys_addr_t min_addr;
1009 phys_addr_t max_addr;
1010
1011 PREFIX_PUSH();
1012 setup_memblock();
1013
1014 min_addr = memblock_start_of_DRAM() + SZ_1K;
1015 max_addr = memblock_end_of_DRAM() + SZ_256;
1016
1017 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1018 min_addr, max_addr,
1019 NUMA_NO_NODE);
1020
1021 ASSERT_NE(allocated_ptr, NULL);
1022 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1023
1024 ASSERT_EQ(rgn->size, size);
1025 ASSERT_EQ(rgn->base, min_addr);
1026
1027 ASSERT_EQ(memblock.reserved.cnt, 1);
1028 ASSERT_EQ(memblock.reserved.total_size, size);
1029
1030 test_pass_pop();
1031
1032 return 0;
1033 }
1034
1035 /*
1036 * A test that tries to allocate a memory region, where min_addr is
1037 * smaller than the start address of the available memory. Expect to allocate
1038 * a region at the beginning of the available memory.
1039 */
alloc_try_nid_bottom_up_cap_min_check(void)1040 static int alloc_try_nid_bottom_up_cap_min_check(void)
1041 {
1042 struct memblock_region *rgn = &memblock.reserved.regions[0];
1043 void *allocated_ptr = NULL;
1044 phys_addr_t size = SZ_1K;
1045 phys_addr_t min_addr;
1046 phys_addr_t max_addr;
1047
1048 PREFIX_PUSH();
1049 setup_memblock();
1050
1051 min_addr = memblock_start_of_DRAM();
1052 max_addr = memblock_end_of_DRAM() - SZ_256;
1053
1054 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1055 min_addr, max_addr,
1056 NUMA_NO_NODE);
1057
1058 ASSERT_NE(allocated_ptr, NULL);
1059 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1060
1061 ASSERT_EQ(rgn->size, size);
1062 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1063
1064 ASSERT_EQ(memblock.reserved.cnt, 1);
1065 ASSERT_EQ(memblock.reserved.total_size, size);
1066
1067 test_pass_pop();
1068
1069 return 0;
1070 }
1071
1072 /* Test case wrappers for range tests */
alloc_try_nid_simple_check(void)1073 static int alloc_try_nid_simple_check(void)
1074 {
1075 test_print("\tRunning %s...\n", __func__);
1076 memblock_set_bottom_up(false);
1077 alloc_try_nid_top_down_simple_check();
1078 memblock_set_bottom_up(true);
1079 alloc_try_nid_bottom_up_simple_check();
1080
1081 return 0;
1082 }
1083
alloc_try_nid_misaligned_check(void)1084 static int alloc_try_nid_misaligned_check(void)
1085 {
1086 test_print("\tRunning %s...\n", __func__);
1087 memblock_set_bottom_up(false);
1088 alloc_try_nid_top_down_end_misaligned_check();
1089 memblock_set_bottom_up(true);
1090 alloc_try_nid_bottom_up_start_misaligned_check();
1091
1092 return 0;
1093 }
1094
alloc_try_nid_narrow_range_check(void)1095 static int alloc_try_nid_narrow_range_check(void)
1096 {
1097 test_print("\tRunning %s...\n", __func__);
1098 memblock_set_bottom_up(false);
1099 alloc_try_nid_top_down_narrow_range_check();
1100 memblock_set_bottom_up(true);
1101 alloc_try_nid_bottom_up_narrow_range_check();
1102
1103 return 0;
1104 }
1105
alloc_try_nid_reserved_with_space_check(void)1106 static int alloc_try_nid_reserved_with_space_check(void)
1107 {
1108 test_print("\tRunning %s...\n", __func__);
1109 memblock_set_bottom_up(false);
1110 alloc_try_nid_top_down_reserved_with_space_check();
1111 memblock_set_bottom_up(true);
1112 alloc_try_nid_bottom_up_reserved_with_space_check();
1113
1114 return 0;
1115 }
1116
alloc_try_nid_reserved_no_space_check(void)1117 static int alloc_try_nid_reserved_no_space_check(void)
1118 {
1119 test_print("\tRunning %s...\n", __func__);
1120 memblock_set_bottom_up(false);
1121 alloc_try_nid_top_down_reserved_no_space_check();
1122 memblock_set_bottom_up(true);
1123 alloc_try_nid_bottom_up_reserved_no_space_check();
1124
1125 return 0;
1126 }
1127
alloc_try_nid_cap_max_check(void)1128 static int alloc_try_nid_cap_max_check(void)
1129 {
1130 test_print("\tRunning %s...\n", __func__);
1131 memblock_set_bottom_up(false);
1132 alloc_try_nid_top_down_cap_max_check();
1133 memblock_set_bottom_up(true);
1134 alloc_try_nid_bottom_up_cap_max_check();
1135
1136 return 0;
1137 }
1138
alloc_try_nid_cap_min_check(void)1139 static int alloc_try_nid_cap_min_check(void)
1140 {
1141 test_print("\tRunning %s...\n", __func__);
1142 memblock_set_bottom_up(false);
1143 alloc_try_nid_top_down_cap_min_check();
1144 memblock_set_bottom_up(true);
1145 alloc_try_nid_bottom_up_cap_min_check();
1146
1147 return 0;
1148 }
1149
alloc_try_nid_min_reserved_check(void)1150 static int alloc_try_nid_min_reserved_check(void)
1151 {
1152 test_print("\tRunning %s...\n", __func__);
1153 run_top_down(alloc_try_nid_min_reserved_generic_check);
1154 run_bottom_up(alloc_try_nid_min_reserved_generic_check);
1155
1156 return 0;
1157 }
1158
alloc_try_nid_max_reserved_check(void)1159 static int alloc_try_nid_max_reserved_check(void)
1160 {
1161 test_print("\tRunning %s...\n", __func__);
1162 run_top_down(alloc_try_nid_max_reserved_generic_check);
1163 run_bottom_up(alloc_try_nid_max_reserved_generic_check);
1164
1165 return 0;
1166 }
1167
alloc_try_nid_exact_address_check(void)1168 static int alloc_try_nid_exact_address_check(void)
1169 {
1170 test_print("\tRunning %s...\n", __func__);
1171 run_top_down(alloc_try_nid_exact_address_generic_check);
1172 run_bottom_up(alloc_try_nid_exact_address_generic_check);
1173
1174 return 0;
1175 }
1176
alloc_try_nid_reserved_full_merge_check(void)1177 static int alloc_try_nid_reserved_full_merge_check(void)
1178 {
1179 test_print("\tRunning %s...\n", __func__);
1180 run_top_down(alloc_try_nid_reserved_full_merge_generic_check);
1181 run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check);
1182
1183 return 0;
1184 }
1185
alloc_try_nid_reserved_all_check(void)1186 static int alloc_try_nid_reserved_all_check(void)
1187 {
1188 test_print("\tRunning %s...\n", __func__);
1189 run_top_down(alloc_try_nid_reserved_all_generic_check);
1190 run_bottom_up(alloc_try_nid_reserved_all_generic_check);
1191
1192 return 0;
1193 }
1194
alloc_try_nid_low_max_check(void)1195 static int alloc_try_nid_low_max_check(void)
1196 {
1197 test_print("\tRunning %s...\n", __func__);
1198 run_top_down(alloc_try_nid_low_max_generic_check);
1199 run_bottom_up(alloc_try_nid_low_max_generic_check);
1200
1201 return 0;
1202 }
1203
memblock_alloc_nid_range_checks(void)1204 static int memblock_alloc_nid_range_checks(void)
1205 {
1206 test_print("Running %s range tests...\n",
1207 get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
1208
1209 alloc_try_nid_simple_check();
1210 alloc_try_nid_misaligned_check();
1211 alloc_try_nid_narrow_range_check();
1212 alloc_try_nid_reserved_with_space_check();
1213 alloc_try_nid_reserved_no_space_check();
1214 alloc_try_nid_cap_max_check();
1215 alloc_try_nid_cap_min_check();
1216
1217 alloc_try_nid_min_reserved_check();
1218 alloc_try_nid_max_reserved_check();
1219 alloc_try_nid_exact_address_check();
1220 alloc_try_nid_reserved_full_merge_check();
1221 alloc_try_nid_reserved_all_check();
1222 alloc_try_nid_low_max_check();
1223
1224 return 0;
1225 }
1226
1227 /*
1228 * A test that tries to allocate a memory region in a specific NUMA node that
1229 * has enough memory to allocate a region of the requested size.
1230 * Expect to allocate an aligned region at the end of the requested node.
1231 */
alloc_try_nid_top_down_numa_simple_check(void)1232 static int alloc_try_nid_top_down_numa_simple_check(void)
1233 {
1234 int nid_req = 3;
1235 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1236 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1237 void *allocated_ptr = NULL;
1238 phys_addr_t size;
1239 phys_addr_t min_addr;
1240 phys_addr_t max_addr;
1241
1242 PREFIX_PUSH();
1243 setup_numa_memblock(node_fractions);
1244
1245 ASSERT_LE(SZ_4, req_node->size);
1246 size = req_node->size / SZ_4;
1247 min_addr = memblock_start_of_DRAM();
1248 max_addr = memblock_end_of_DRAM();
1249
1250 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1251 min_addr, max_addr, nid_req);
1252
1253 ASSERT_NE(allocated_ptr, NULL);
1254 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1255
1256 ASSERT_EQ(new_rgn->size, size);
1257 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1258 ASSERT_LE(req_node->base, new_rgn->base);
1259
1260 ASSERT_EQ(memblock.reserved.cnt, 1);
1261 ASSERT_EQ(memblock.reserved.total_size, size);
1262
1263 test_pass_pop();
1264
1265 return 0;
1266 }
1267
1268 /*
1269 * A test that tries to allocate a memory region in a specific NUMA node that
1270 * does not have enough memory to allocate a region of the requested size:
1271 *
1272 * | +-----+ +------------------+ |
1273 * | | req | | expected | |
1274 * +---+-----+----------+------------------+-----+
1275 *
1276 * | +---------+ |
1277 * | | rgn | |
1278 * +-----------------------------+---------+-----+
1279 *
1280 * Expect to allocate an aligned region at the end of the last node that has
1281 * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1282 */
alloc_try_nid_top_down_numa_small_node_check(void)1283 static int alloc_try_nid_top_down_numa_small_node_check(void)
1284 {
1285 int nid_req = 1;
1286 int nid_exp = 6;
1287 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1288 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1289 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1290 void *allocated_ptr = NULL;
1291 phys_addr_t size;
1292 phys_addr_t min_addr;
1293 phys_addr_t max_addr;
1294
1295 PREFIX_PUSH();
1296 setup_numa_memblock(node_fractions);
1297
1298 size = SZ_2 * req_node->size;
1299 min_addr = memblock_start_of_DRAM();
1300 max_addr = memblock_end_of_DRAM();
1301
1302 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1303 min_addr, max_addr, nid_req);
1304
1305 ASSERT_NE(allocated_ptr, NULL);
1306 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1307
1308 ASSERT_EQ(new_rgn->size, size);
1309 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1310 ASSERT_LE(exp_node->base, new_rgn->base);
1311
1312 ASSERT_EQ(memblock.reserved.cnt, 1);
1313 ASSERT_EQ(memblock.reserved.total_size, size);
1314
1315 test_pass_pop();
1316
1317 return 0;
1318 }
1319
1320 /*
1321 * A test that tries to allocate a memory region in a specific NUMA node that
1322 * is fully reserved:
1323 *
1324 * | +---------+ +------------------+ |
1325 * | |requested| | expected | |
1326 * +--------------+---------+------------+------------------+-----+
1327 *
1328 * | +---------+ +---------+ |
1329 * | | reserved| | new | |
1330 * +--------------+---------+---------------------+---------+-----+
1331 *
1332 * Expect to allocate an aligned region at the end of the last node that is
1333 * large enough and has enough unreserved memory (in this case, nid = 6) after
1334 * falling back to NUMA_NO_NODE. The region count and total size get updated.
1335 */
alloc_try_nid_top_down_numa_node_reserved_check(void)1336 static int alloc_try_nid_top_down_numa_node_reserved_check(void)
1337 {
1338 int nid_req = 2;
1339 int nid_exp = 6;
1340 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1341 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1342 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1343 void *allocated_ptr = NULL;
1344 phys_addr_t size;
1345 phys_addr_t min_addr;
1346 phys_addr_t max_addr;
1347
1348 PREFIX_PUSH();
1349 setup_numa_memblock(node_fractions);
1350
1351 size = req_node->size;
1352 min_addr = memblock_start_of_DRAM();
1353 max_addr = memblock_end_of_DRAM();
1354
1355 memblock_reserve(req_node->base, req_node->size);
1356 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1357 min_addr, max_addr, nid_req);
1358
1359 ASSERT_NE(allocated_ptr, NULL);
1360 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1361
1362 ASSERT_EQ(new_rgn->size, size);
1363 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1364 ASSERT_LE(exp_node->base, new_rgn->base);
1365
1366 ASSERT_EQ(memblock.reserved.cnt, 2);
1367 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1368
1369 test_pass_pop();
1370
1371 return 0;
1372 }
1373
1374 /*
1375 * A test that tries to allocate a memory region in a specific NUMA node that
1376 * is partially reserved but has enough memory for the allocated region:
1377 *
1378 * | +---------------------------------------+ |
1379 * | | requested | |
1380 * +-----------+---------------------------------------+----------+
1381 *
1382 * | +------------------+ +-----+ |
1383 * | | reserved | | new | |
1384 * +-----------+------------------+--------------+-----+----------+
1385 *
1386 * Expect to allocate an aligned region at the end of the requested node. The
1387 * region count and total size get updated.
1388 */
alloc_try_nid_top_down_numa_part_reserved_check(void)1389 static int alloc_try_nid_top_down_numa_part_reserved_check(void)
1390 {
1391 int nid_req = 4;
1392 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1393 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1394 void *allocated_ptr = NULL;
1395 struct region r1;
1396 phys_addr_t size;
1397 phys_addr_t min_addr;
1398 phys_addr_t max_addr;
1399
1400 PREFIX_PUSH();
1401 setup_numa_memblock(node_fractions);
1402
1403 ASSERT_LE(SZ_8, req_node->size);
1404 r1.base = req_node->base;
1405 r1.size = req_node->size / SZ_2;
1406 size = r1.size / SZ_4;
1407 min_addr = memblock_start_of_DRAM();
1408 max_addr = memblock_end_of_DRAM();
1409
1410 memblock_reserve(r1.base, r1.size);
1411 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1412 min_addr, max_addr, nid_req);
1413
1414 ASSERT_NE(allocated_ptr, NULL);
1415 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1416
1417 ASSERT_EQ(new_rgn->size, size);
1418 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1419 ASSERT_LE(req_node->base, new_rgn->base);
1420
1421 ASSERT_EQ(memblock.reserved.cnt, 2);
1422 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1423
1424 test_pass_pop();
1425
1426 return 0;
1427 }
1428
1429 /*
1430 * A test that tries to allocate a memory region in a specific NUMA node that
1431 * is partially reserved and does not have enough contiguous memory for the
1432 * allocated region:
1433 *
1434 * | +-----------------------+ +----------------------|
1435 * | | requested | | expected |
1436 * +-----------+-----------------------+---------+----------------------+
1437 *
1438 * | +----------+ +-----------|
1439 * | | reserved | | new |
1440 * +-----------------+----------+---------------------------+-----------+
1441 *
1442 * Expect to allocate an aligned region at the end of the last node that is
1443 * large enough and has enough unreserved memory (in this case,
1444 * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1445 * and total size get updated.
1446 */
alloc_try_nid_top_down_numa_part_reserved_fallback_check(void)1447 static int alloc_try_nid_top_down_numa_part_reserved_fallback_check(void)
1448 {
1449 int nid_req = 4;
1450 int nid_exp = NUMA_NODES - 1;
1451 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1452 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1453 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1454 void *allocated_ptr = NULL;
1455 struct region r1;
1456 phys_addr_t size;
1457 phys_addr_t min_addr;
1458 phys_addr_t max_addr;
1459
1460 PREFIX_PUSH();
1461 setup_numa_memblock(node_fractions);
1462
1463 ASSERT_LE(SZ_4, req_node->size);
1464 size = req_node->size / SZ_2;
1465 r1.base = req_node->base + (size / SZ_2);
1466 r1.size = size;
1467
1468 min_addr = memblock_start_of_DRAM();
1469 max_addr = memblock_end_of_DRAM();
1470
1471 memblock_reserve(r1.base, r1.size);
1472 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1473 min_addr, max_addr, nid_req);
1474
1475 ASSERT_NE(allocated_ptr, NULL);
1476 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1477
1478 ASSERT_EQ(new_rgn->size, size);
1479 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1480 ASSERT_LE(exp_node->base, new_rgn->base);
1481
1482 ASSERT_EQ(memblock.reserved.cnt, 2);
1483 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1484
1485 test_pass_pop();
1486
1487 return 0;
1488 }
1489
1490 /*
1491 * A test that tries to allocate a memory region that spans over the min_addr
1492 * and max_addr range and overlaps with two different nodes, where the first
1493 * node is the requested node:
1494 *
1495 * min_addr
1496 * | max_addr
1497 * | |
1498 * v v
1499 * | +-----------------------+-----------+ |
1500 * | | requested | node3 | |
1501 * +-----------+-----------------------+-----------+--------------+
1502 * + +
1503 * | +-----------+ |
1504 * | | rgn | |
1505 * +-----------------------+-----------+--------------------------+
1506 *
1507 * Expect to drop the lower limit and allocate a memory region that ends at
1508 * the end of the requested node.
1509 */
alloc_try_nid_top_down_numa_split_range_low_check(void)1510 static int alloc_try_nid_top_down_numa_split_range_low_check(void)
1511 {
1512 int nid_req = 2;
1513 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1514 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1515 void *allocated_ptr = NULL;
1516 phys_addr_t size = SZ_512;
1517 phys_addr_t min_addr;
1518 phys_addr_t max_addr;
1519 phys_addr_t req_node_end;
1520
1521 PREFIX_PUSH();
1522 setup_numa_memblock(node_fractions);
1523
1524 req_node_end = region_end(req_node);
1525 min_addr = req_node_end - SZ_256;
1526 max_addr = min_addr + size;
1527
1528 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1529 min_addr, max_addr, nid_req);
1530
1531 ASSERT_NE(allocated_ptr, NULL);
1532 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1533
1534 ASSERT_EQ(new_rgn->size, size);
1535 ASSERT_EQ(new_rgn->base, req_node_end - size);
1536 ASSERT_LE(req_node->base, new_rgn->base);
1537
1538 ASSERT_EQ(memblock.reserved.cnt, 1);
1539 ASSERT_EQ(memblock.reserved.total_size, size);
1540
1541 test_pass_pop();
1542
1543 return 0;
1544 }
1545
1546 /*
1547 * A test that tries to allocate a memory region that spans over the min_addr
1548 * and max_addr range and overlaps with two different nodes, where the second
1549 * node is the requested node:
1550 *
1551 * min_addr
1552 * | max_addr
1553 * | |
1554 * v v
1555 * | +--------------------------+---------+ |
1556 * | | expected |requested| |
1557 * +------+--------------------------+---------+----------------+
1558 * + +
1559 * | +---------+ |
1560 * | | rgn | |
1561 * +-----------------------+---------+--------------------------+
1562 *
1563 * Expect to drop the lower limit and allocate a memory region that
1564 * ends at the end of the first node that overlaps with the range.
1565 */
alloc_try_nid_top_down_numa_split_range_high_check(void)1566 static int alloc_try_nid_top_down_numa_split_range_high_check(void)
1567 {
1568 int nid_req = 3;
1569 int nid_exp = nid_req - 1;
1570 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1571 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1572 void *allocated_ptr = NULL;
1573 phys_addr_t size = SZ_512;
1574 phys_addr_t min_addr;
1575 phys_addr_t max_addr;
1576 phys_addr_t exp_node_end;
1577
1578 PREFIX_PUSH();
1579 setup_numa_memblock(node_fractions);
1580
1581 exp_node_end = region_end(exp_node);
1582 min_addr = exp_node_end - SZ_256;
1583 max_addr = min_addr + size;
1584
1585 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1586 min_addr, max_addr, nid_req);
1587
1588 ASSERT_NE(allocated_ptr, NULL);
1589 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1590
1591 ASSERT_EQ(new_rgn->size, size);
1592 ASSERT_EQ(new_rgn->base, exp_node_end - size);
1593 ASSERT_LE(exp_node->base, new_rgn->base);
1594
1595 ASSERT_EQ(memblock.reserved.cnt, 1);
1596 ASSERT_EQ(memblock.reserved.total_size, size);
1597
1598 test_pass_pop();
1599
1600 return 0;
1601 }
1602
1603 /*
1604 * A test that tries to allocate a memory region that spans over the min_addr
1605 * and max_addr range and overlaps with two different nodes, where the requested
1606 * node ends before min_addr:
1607 *
1608 * min_addr
1609 * | max_addr
1610 * | |
1611 * v v
1612 * | +---------------+ +-------------+---------+ |
1613 * | | requested | | node1 | node2 | |
1614 * +----+---------------+--------+-------------+---------+----------+
1615 * + +
1616 * | +---------+ |
1617 * | | rgn | |
1618 * +----------+---------+-------------------------------------------+
1619 *
1620 * Expect to drop the lower limit and allocate a memory region that ends at
1621 * the end of the requested node.
1622 */
alloc_try_nid_top_down_numa_no_overlap_split_check(void)1623 static int alloc_try_nid_top_down_numa_no_overlap_split_check(void)
1624 {
1625 int nid_req = 2;
1626 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1627 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1628 struct memblock_region *node2 = &memblock.memory.regions[6];
1629 void *allocated_ptr = NULL;
1630 phys_addr_t size;
1631 phys_addr_t min_addr;
1632 phys_addr_t max_addr;
1633
1634 PREFIX_PUSH();
1635 setup_numa_memblock(node_fractions);
1636
1637 size = SZ_512;
1638 min_addr = node2->base - SZ_256;
1639 max_addr = min_addr + size;
1640
1641 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1642 min_addr, max_addr, nid_req);
1643
1644 ASSERT_NE(allocated_ptr, NULL);
1645 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1646
1647 ASSERT_EQ(new_rgn->size, size);
1648 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1649 ASSERT_LE(req_node->base, new_rgn->base);
1650
1651 ASSERT_EQ(memblock.reserved.cnt, 1);
1652 ASSERT_EQ(memblock.reserved.total_size, size);
1653
1654 test_pass_pop();
1655
1656 return 0;
1657 }
1658
1659 /*
1660 * A test that tries to allocate memory within min_addr and max_add range when
1661 * the requested node and the range do not overlap, and requested node ends
1662 * before min_addr. The range overlaps with multiple nodes along node
1663 * boundaries:
1664 *
1665 * min_addr
1666 * | max_addr
1667 * | |
1668 * v v
1669 * |-----------+ +----------+----...----+----------+ |
1670 * | requested | | min node | ... | max node | |
1671 * +-----------+-----------+----------+----...----+----------+------+
1672 * + +
1673 * | +-----+ |
1674 * | | rgn | |
1675 * +---------------------------------------------------+-----+------+
1676 *
1677 * Expect to allocate a memory region at the end of the final node in
1678 * the range after falling back to NUMA_NO_NODE.
1679 */
alloc_try_nid_top_down_numa_no_overlap_low_check(void)1680 static int alloc_try_nid_top_down_numa_no_overlap_low_check(void)
1681 {
1682 int nid_req = 0;
1683 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1684 struct memblock_region *min_node = &memblock.memory.regions[2];
1685 struct memblock_region *max_node = &memblock.memory.regions[5];
1686 void *allocated_ptr = NULL;
1687 phys_addr_t size = SZ_64;
1688 phys_addr_t max_addr;
1689 phys_addr_t min_addr;
1690
1691 PREFIX_PUSH();
1692 setup_numa_memblock(node_fractions);
1693
1694 min_addr = min_node->base;
1695 max_addr = region_end(max_node);
1696
1697 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1698 min_addr, max_addr, nid_req);
1699
1700 ASSERT_NE(allocated_ptr, NULL);
1701 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1702
1703 ASSERT_EQ(new_rgn->size, size);
1704 ASSERT_EQ(new_rgn->base, max_addr - size);
1705 ASSERT_LE(max_node->base, new_rgn->base);
1706
1707 ASSERT_EQ(memblock.reserved.cnt, 1);
1708 ASSERT_EQ(memblock.reserved.total_size, size);
1709
1710 test_pass_pop();
1711
1712 return 0;
1713 }
1714
1715 /*
1716 * A test that tries to allocate memory within min_addr and max_add range when
1717 * the requested node and the range do not overlap, and requested node starts
1718 * after max_addr. The range overlaps with multiple nodes along node
1719 * boundaries:
1720 *
1721 * min_addr
1722 * | max_addr
1723 * | |
1724 * v v
1725 * | +----------+----...----+----------+ +-----------+ |
1726 * | | min node | ... | max node | | requested | |
1727 * +-----+----------+----...----+----------+--------+-----------+---+
1728 * + +
1729 * | +-----+ |
1730 * | | rgn | |
1731 * +---------------------------------+-----+------------------------+
1732 *
1733 * Expect to allocate a memory region at the end of the final node in
1734 * the range after falling back to NUMA_NO_NODE.
1735 */
alloc_try_nid_top_down_numa_no_overlap_high_check(void)1736 static int alloc_try_nid_top_down_numa_no_overlap_high_check(void)
1737 {
1738 int nid_req = 7;
1739 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1740 struct memblock_region *min_node = &memblock.memory.regions[2];
1741 struct memblock_region *max_node = &memblock.memory.regions[5];
1742 void *allocated_ptr = NULL;
1743 phys_addr_t size = SZ_64;
1744 phys_addr_t max_addr;
1745 phys_addr_t min_addr;
1746
1747 PREFIX_PUSH();
1748 setup_numa_memblock(node_fractions);
1749
1750 min_addr = min_node->base;
1751 max_addr = region_end(max_node);
1752
1753 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1754 min_addr, max_addr, nid_req);
1755
1756 ASSERT_NE(allocated_ptr, NULL);
1757 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1758
1759 ASSERT_EQ(new_rgn->size, size);
1760 ASSERT_EQ(new_rgn->base, max_addr - size);
1761 ASSERT_LE(max_node->base, new_rgn->base);
1762
1763 ASSERT_EQ(memblock.reserved.cnt, 1);
1764 ASSERT_EQ(memblock.reserved.total_size, size);
1765
1766 test_pass_pop();
1767
1768 return 0;
1769 }
1770
1771 /*
1772 * A test that tries to allocate a memory region in a specific NUMA node that
1773 * has enough memory to allocate a region of the requested size.
1774 * Expect to allocate an aligned region at the beginning of the requested node.
1775 */
alloc_try_nid_bottom_up_numa_simple_check(void)1776 static int alloc_try_nid_bottom_up_numa_simple_check(void)
1777 {
1778 int nid_req = 3;
1779 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1780 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1781 void *allocated_ptr = NULL;
1782 phys_addr_t size;
1783 phys_addr_t min_addr;
1784 phys_addr_t max_addr;
1785
1786 PREFIX_PUSH();
1787 setup_numa_memblock(node_fractions);
1788
1789 ASSERT_LE(SZ_4, req_node->size);
1790 size = req_node->size / SZ_4;
1791 min_addr = memblock_start_of_DRAM();
1792 max_addr = memblock_end_of_DRAM();
1793
1794 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1795 min_addr, max_addr, nid_req);
1796
1797 ASSERT_NE(allocated_ptr, NULL);
1798 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1799
1800 ASSERT_EQ(new_rgn->size, size);
1801 ASSERT_EQ(new_rgn->base, req_node->base);
1802 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1803
1804 ASSERT_EQ(memblock.reserved.cnt, 1);
1805 ASSERT_EQ(memblock.reserved.total_size, size);
1806
1807 test_pass_pop();
1808
1809 return 0;
1810 }
1811
1812 /*
1813 * A test that tries to allocate a memory region in a specific NUMA node that
1814 * does not have enough memory to allocate a region of the requested size:
1815 *
1816 * |----------------------+-----+ |
1817 * | expected | req | |
1818 * +----------------------+-----+----------------+
1819 *
1820 * |---------+ |
1821 * | rgn | |
1822 * +---------+-----------------------------------+
1823 *
1824 * Expect to allocate an aligned region at the beginning of the first node that
1825 * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1826 */
alloc_try_nid_bottom_up_numa_small_node_check(void)1827 static int alloc_try_nid_bottom_up_numa_small_node_check(void)
1828 {
1829 int nid_req = 1;
1830 int nid_exp = 0;
1831 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1832 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1833 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1834 void *allocated_ptr = NULL;
1835 phys_addr_t size;
1836 phys_addr_t min_addr;
1837 phys_addr_t max_addr;
1838
1839 PREFIX_PUSH();
1840 setup_numa_memblock(node_fractions);
1841
1842 size = SZ_2 * req_node->size;
1843 min_addr = memblock_start_of_DRAM();
1844 max_addr = memblock_end_of_DRAM();
1845
1846 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1847 min_addr, max_addr, nid_req);
1848
1849 ASSERT_NE(allocated_ptr, NULL);
1850 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1851
1852 ASSERT_EQ(new_rgn->size, size);
1853 ASSERT_EQ(new_rgn->base, exp_node->base);
1854 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1855
1856 ASSERT_EQ(memblock.reserved.cnt, 1);
1857 ASSERT_EQ(memblock.reserved.total_size, size);
1858
1859 test_pass_pop();
1860
1861 return 0;
1862 }
1863
1864 /*
1865 * A test that tries to allocate a memory region in a specific NUMA node that
1866 * is fully reserved:
1867 *
1868 * |----------------------+ +-----------+ |
1869 * | expected | | requested | |
1870 * +----------------------+-----+-----------+--------------------+
1871 *
1872 * |-----------+ +-----------+ |
1873 * | new | | reserved | |
1874 * +-----------+----------------+-----------+--------------------+
1875 *
1876 * Expect to allocate an aligned region at the beginning of the first node that
1877 * is large enough and has enough unreserved memory (in this case, nid = 0)
1878 * after falling back to NUMA_NO_NODE. The region count and total size get
1879 * updated.
1880 */
alloc_try_nid_bottom_up_numa_node_reserved_check(void)1881 static int alloc_try_nid_bottom_up_numa_node_reserved_check(void)
1882 {
1883 int nid_req = 2;
1884 int nid_exp = 0;
1885 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1886 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1887 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1888 void *allocated_ptr = NULL;
1889 phys_addr_t size;
1890 phys_addr_t min_addr;
1891 phys_addr_t max_addr;
1892
1893 PREFIX_PUSH();
1894 setup_numa_memblock(node_fractions);
1895
1896 size = req_node->size;
1897 min_addr = memblock_start_of_DRAM();
1898 max_addr = memblock_end_of_DRAM();
1899
1900 memblock_reserve(req_node->base, req_node->size);
1901 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1902 min_addr, max_addr, nid_req);
1903
1904 ASSERT_NE(allocated_ptr, NULL);
1905 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1906
1907 ASSERT_EQ(new_rgn->size, size);
1908 ASSERT_EQ(new_rgn->base, exp_node->base);
1909 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1910
1911 ASSERT_EQ(memblock.reserved.cnt, 2);
1912 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1913
1914 test_pass_pop();
1915
1916 return 0;
1917 }
1918
1919 /*
1920 * A test that tries to allocate a memory region in a specific NUMA node that
1921 * is partially reserved but has enough memory for the allocated region:
1922 *
1923 * | +---------------------------------------+ |
1924 * | | requested | |
1925 * +-----------+---------------------------------------+---------+
1926 *
1927 * | +------------------+-----+ |
1928 * | | reserved | new | |
1929 * +-----------+------------------+-----+------------------------+
1930 *
1931 * Expect to allocate an aligned region in the requested node that merges with
1932 * the existing reserved region. The total size gets updated.
1933 */
alloc_try_nid_bottom_up_numa_part_reserved_check(void)1934 static int alloc_try_nid_bottom_up_numa_part_reserved_check(void)
1935 {
1936 int nid_req = 4;
1937 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1938 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1939 void *allocated_ptr = NULL;
1940 struct region r1;
1941 phys_addr_t size;
1942 phys_addr_t min_addr;
1943 phys_addr_t max_addr;
1944 phys_addr_t total_size;
1945
1946 PREFIX_PUSH();
1947 setup_numa_memblock(node_fractions);
1948
1949 ASSERT_LE(SZ_8, req_node->size);
1950 r1.base = req_node->base;
1951 r1.size = req_node->size / SZ_2;
1952 size = r1.size / SZ_4;
1953 min_addr = memblock_start_of_DRAM();
1954 max_addr = memblock_end_of_DRAM();
1955 total_size = size + r1.size;
1956
1957 memblock_reserve(r1.base, r1.size);
1958 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1959 min_addr, max_addr, nid_req);
1960
1961 ASSERT_NE(allocated_ptr, NULL);
1962 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1963
1964 ASSERT_EQ(new_rgn->size, total_size);
1965 ASSERT_EQ(new_rgn->base, req_node->base);
1966 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1967
1968 ASSERT_EQ(memblock.reserved.cnt, 1);
1969 ASSERT_EQ(memblock.reserved.total_size, total_size);
1970
1971 test_pass_pop();
1972
1973 return 0;
1974 }
1975
1976 /*
1977 * A test that tries to allocate a memory region in a specific NUMA node that
1978 * is partially reserved and does not have enough contiguous memory for the
1979 * allocated region:
1980 *
1981 * |----------------------+ +-----------------------+ |
1982 * | expected | | requested | |
1983 * +----------------------+-------+-----------------------+---------+
1984 *
1985 * |-----------+ +----------+ |
1986 * | new | | reserved | |
1987 * +-----------+------------------------+----------+----------------+
1988 *
1989 * Expect to allocate an aligned region at the beginning of the first
1990 * node that is large enough and has enough unreserved memory (in this case,
1991 * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
1992 * get updated.
1993 */
alloc_try_nid_bottom_up_numa_part_reserved_fallback_check(void)1994 static int alloc_try_nid_bottom_up_numa_part_reserved_fallback_check(void)
1995 {
1996 int nid_req = 4;
1997 int nid_exp = 0;
1998 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1999 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2000 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2001 void *allocated_ptr = NULL;
2002 struct region r1;
2003 phys_addr_t size;
2004 phys_addr_t min_addr;
2005 phys_addr_t max_addr;
2006
2007 PREFIX_PUSH();
2008 setup_numa_memblock(node_fractions);
2009
2010 ASSERT_LE(SZ_4, req_node->size);
2011 size = req_node->size / SZ_2;
2012 r1.base = req_node->base + (size / SZ_2);
2013 r1.size = size;
2014
2015 min_addr = memblock_start_of_DRAM();
2016 max_addr = memblock_end_of_DRAM();
2017
2018 memblock_reserve(r1.base, r1.size);
2019 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2020 min_addr, max_addr, nid_req);
2021
2022 ASSERT_NE(allocated_ptr, NULL);
2023 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2024
2025 ASSERT_EQ(new_rgn->size, size);
2026 ASSERT_EQ(new_rgn->base, exp_node->base);
2027 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
2028
2029 ASSERT_EQ(memblock.reserved.cnt, 2);
2030 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
2031
2032 test_pass_pop();
2033
2034 return 0;
2035 }
2036
2037 /*
2038 * A test that tries to allocate a memory region that spans over the min_addr
2039 * and max_addr range and overlaps with two different nodes, where the first
2040 * node is the requested node:
2041 *
2042 * min_addr
2043 * | max_addr
2044 * | |
2045 * v v
2046 * | +-----------------------+-----------+ |
2047 * | | requested | node3 | |
2048 * +-----------+-----------------------+-----------+--------------+
2049 * + +
2050 * | +-----------+ |
2051 * | | rgn | |
2052 * +-----------+-----------+--------------------------------------+
2053 *
2054 * Expect to drop the lower limit and allocate a memory region at the beginning
2055 * of the requested node.
2056 */
alloc_try_nid_bottom_up_numa_split_range_low_check(void)2057 static int alloc_try_nid_bottom_up_numa_split_range_low_check(void)
2058 {
2059 int nid_req = 2;
2060 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2061 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2062 void *allocated_ptr = NULL;
2063 phys_addr_t size = SZ_512;
2064 phys_addr_t min_addr;
2065 phys_addr_t max_addr;
2066 phys_addr_t req_node_end;
2067
2068 PREFIX_PUSH();
2069 setup_numa_memblock(node_fractions);
2070
2071 req_node_end = region_end(req_node);
2072 min_addr = req_node_end - SZ_256;
2073 max_addr = min_addr + size;
2074
2075 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2076 min_addr, max_addr, nid_req);
2077
2078 ASSERT_NE(allocated_ptr, NULL);
2079 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2080
2081 ASSERT_EQ(new_rgn->size, size);
2082 ASSERT_EQ(new_rgn->base, req_node->base);
2083 ASSERT_LE(region_end(new_rgn), req_node_end);
2084
2085 ASSERT_EQ(memblock.reserved.cnt, 1);
2086 ASSERT_EQ(memblock.reserved.total_size, size);
2087
2088 test_pass_pop();
2089
2090 return 0;
2091 }
2092
2093 /*
2094 * A test that tries to allocate a memory region that spans over the min_addr
2095 * and max_addr range and overlaps with two different nodes, where the second
2096 * node is the requested node:
2097 *
2098 * min_addr
2099 * | max_addr
2100 * | |
2101 * v v
2102 * |------------------+ +----------------------+---------+ |
2103 * | expected | | previous |requested| |
2104 * +------------------+--------+----------------------+---------+------+
2105 * + +
2106 * |---------+ |
2107 * | rgn | |
2108 * +---------+---------------------------------------------------------+
2109 *
2110 * Expect to drop the lower limit and allocate a memory region at the beginning
2111 * of the first node that has enough memory.
2112 */
alloc_try_nid_bottom_up_numa_split_range_high_check(void)2113 static int alloc_try_nid_bottom_up_numa_split_range_high_check(void)
2114 {
2115 int nid_req = 3;
2116 int nid_exp = 0;
2117 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2118 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2119 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2120 void *allocated_ptr = NULL;
2121 phys_addr_t size = SZ_512;
2122 phys_addr_t min_addr;
2123 phys_addr_t max_addr;
2124 phys_addr_t exp_node_end;
2125
2126 PREFIX_PUSH();
2127 setup_numa_memblock(node_fractions);
2128
2129 exp_node_end = region_end(req_node);
2130 min_addr = req_node->base - SZ_256;
2131 max_addr = min_addr + size;
2132
2133 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2134 min_addr, max_addr, nid_req);
2135
2136 ASSERT_NE(allocated_ptr, NULL);
2137 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2138
2139 ASSERT_EQ(new_rgn->size, size);
2140 ASSERT_EQ(new_rgn->base, exp_node->base);
2141 ASSERT_LE(region_end(new_rgn), exp_node_end);
2142
2143 ASSERT_EQ(memblock.reserved.cnt, 1);
2144 ASSERT_EQ(memblock.reserved.total_size, size);
2145
2146 test_pass_pop();
2147
2148 return 0;
2149 }
2150
2151 /*
2152 * A test that tries to allocate a memory region that spans over the min_addr
2153 * and max_addr range and overlaps with two different nodes, where the requested
2154 * node ends before min_addr:
2155 *
2156 * min_addr
2157 * | max_addr
2158 * | |
2159 * v v
2160 * | +---------------+ +-------------+---------+ |
2161 * | | requested | | node1 | node2 | |
2162 * +----+---------------+--------+-------------+---------+---------+
2163 * + +
2164 * | +---------+ |
2165 * | | rgn | |
2166 * +----+---------+------------------------------------------------+
2167 *
2168 * Expect to drop the lower limit and allocate a memory region that starts at
2169 * the beginning of the requested node.
2170 */
alloc_try_nid_bottom_up_numa_no_overlap_split_check(void)2171 static int alloc_try_nid_bottom_up_numa_no_overlap_split_check(void)
2172 {
2173 int nid_req = 2;
2174 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2175 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2176 struct memblock_region *node2 = &memblock.memory.regions[6];
2177 void *allocated_ptr = NULL;
2178 phys_addr_t size;
2179 phys_addr_t min_addr;
2180 phys_addr_t max_addr;
2181
2182 PREFIX_PUSH();
2183 setup_numa_memblock(node_fractions);
2184
2185 size = SZ_512;
2186 min_addr = node2->base - SZ_256;
2187 max_addr = min_addr + size;
2188
2189 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2190 min_addr, max_addr, nid_req);
2191
2192 ASSERT_NE(allocated_ptr, NULL);
2193 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2194
2195 ASSERT_EQ(new_rgn->size, size);
2196 ASSERT_EQ(new_rgn->base, req_node->base);
2197 ASSERT_LE(region_end(new_rgn), region_end(req_node));
2198
2199 ASSERT_EQ(memblock.reserved.cnt, 1);
2200 ASSERT_EQ(memblock.reserved.total_size, size);
2201
2202 test_pass_pop();
2203
2204 return 0;
2205 }
2206
2207 /*
2208 * A test that tries to allocate memory within min_addr and max_add range when
2209 * the requested node and the range do not overlap, and requested node ends
2210 * before min_addr. The range overlaps with multiple nodes along node
2211 * boundaries:
2212 *
2213 * min_addr
2214 * | max_addr
2215 * | |
2216 * v v
2217 * |-----------+ +----------+----...----+----------+ |
2218 * | requested | | min node | ... | max node | |
2219 * +-----------+-----------+----------+----...----+----------+------+
2220 * + +
2221 * | +-----+ |
2222 * | | rgn | |
2223 * +-----------------------+-----+----------------------------------+
2224 *
2225 * Expect to allocate a memory region at the beginning of the first node
2226 * in the range after falling back to NUMA_NO_NODE.
2227 */
alloc_try_nid_bottom_up_numa_no_overlap_low_check(void)2228 static int alloc_try_nid_bottom_up_numa_no_overlap_low_check(void)
2229 {
2230 int nid_req = 0;
2231 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2232 struct memblock_region *min_node = &memblock.memory.regions[2];
2233 struct memblock_region *max_node = &memblock.memory.regions[5];
2234 void *allocated_ptr = NULL;
2235 phys_addr_t size = SZ_64;
2236 phys_addr_t max_addr;
2237 phys_addr_t min_addr;
2238
2239 PREFIX_PUSH();
2240 setup_numa_memblock(node_fractions);
2241
2242 min_addr = min_node->base;
2243 max_addr = region_end(max_node);
2244
2245 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2246 min_addr, max_addr, nid_req);
2247
2248 ASSERT_NE(allocated_ptr, NULL);
2249 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2250
2251 ASSERT_EQ(new_rgn->size, size);
2252 ASSERT_EQ(new_rgn->base, min_addr);
2253 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2254
2255 ASSERT_EQ(memblock.reserved.cnt, 1);
2256 ASSERT_EQ(memblock.reserved.total_size, size);
2257
2258 test_pass_pop();
2259
2260 return 0;
2261 }
2262
2263 /*
2264 * A test that tries to allocate memory within min_addr and max_add range when
2265 * the requested node and the range do not overlap, and requested node starts
2266 * after max_addr. The range overlaps with multiple nodes along node
2267 * boundaries:
2268 *
2269 * min_addr
2270 * | max_addr
2271 * | |
2272 * v v
2273 * | +----------+----...----+----------+ +---------+ |
2274 * | | min node | ... | max node | |requested| |
2275 * +-----+----------+----...----+----------+---------+---------+---+
2276 * + +
2277 * | +-----+ |
2278 * | | rgn | |
2279 * +-----+-----+---------------------------------------------------+
2280 *
2281 * Expect to allocate a memory region at the beginning of the first node
2282 * in the range after falling back to NUMA_NO_NODE.
2283 */
alloc_try_nid_bottom_up_numa_no_overlap_high_check(void)2284 static int alloc_try_nid_bottom_up_numa_no_overlap_high_check(void)
2285 {
2286 int nid_req = 7;
2287 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2288 struct memblock_region *min_node = &memblock.memory.regions[2];
2289 struct memblock_region *max_node = &memblock.memory.regions[5];
2290 void *allocated_ptr = NULL;
2291 phys_addr_t size = SZ_64;
2292 phys_addr_t max_addr;
2293 phys_addr_t min_addr;
2294
2295 PREFIX_PUSH();
2296 setup_numa_memblock(node_fractions);
2297
2298 min_addr = min_node->base;
2299 max_addr = region_end(max_node);
2300
2301 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2302 min_addr, max_addr, nid_req);
2303
2304 ASSERT_NE(allocated_ptr, NULL);
2305 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2306
2307 ASSERT_EQ(new_rgn->size, size);
2308 ASSERT_EQ(new_rgn->base, min_addr);
2309 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2310
2311 ASSERT_EQ(memblock.reserved.cnt, 1);
2312 ASSERT_EQ(memblock.reserved.total_size, size);
2313
2314 test_pass_pop();
2315
2316 return 0;
2317 }
2318
2319 /*
2320 * A test that tries to allocate a memory region in a specific NUMA node that
2321 * does not have enough memory to allocate a region of the requested size.
2322 * Additionally, none of the nodes have enough memory to allocate the region:
2323 *
2324 * +-----------------------------------+
2325 * | new |
2326 * +-----------------------------------+
2327 * |-------+-------+-------+-------+-------+-------+-------+-------|
2328 * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
2329 * +-------+-------+-------+-------+-------+-------+-------+-------+
2330 *
2331 * Expect no allocation to happen.
2332 */
alloc_try_nid_numa_large_region_generic_check(void)2333 static int alloc_try_nid_numa_large_region_generic_check(void)
2334 {
2335 int nid_req = 3;
2336 void *allocated_ptr = NULL;
2337 phys_addr_t size = MEM_SIZE / SZ_2;
2338 phys_addr_t min_addr;
2339 phys_addr_t max_addr;
2340
2341 PREFIX_PUSH();
2342 setup_numa_memblock(node_fractions);
2343
2344 min_addr = memblock_start_of_DRAM();
2345 max_addr = memblock_end_of_DRAM();
2346
2347 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2348 min_addr, max_addr, nid_req);
2349 ASSERT_EQ(allocated_ptr, NULL);
2350
2351 test_pass_pop();
2352
2353 return 0;
2354 }
2355
2356 /*
2357 * A test that tries to allocate memory within min_addr and max_addr range when
2358 * there are two reserved regions at the borders. The requested node starts at
2359 * min_addr and ends at max_addr and is the same size as the region to be
2360 * allocated:
2361 *
2362 * min_addr
2363 * | max_addr
2364 * | |
2365 * v v
2366 * | +-----------+-----------------------+-----------------------|
2367 * | | node5 | requested | node7 |
2368 * +------+-----------+-----------------------+-----------------------+
2369 * + +
2370 * | +----+-----------------------+----+ |
2371 * | | r2 | new | r1 | |
2372 * +-------------+----+-----------------------+----+------------------+
2373 *
2374 * Expect to merge all of the regions into one. The region counter and total
2375 * size fields get updated.
2376 */
alloc_try_nid_numa_reserved_full_merge_generic_check(void)2377 static int alloc_try_nid_numa_reserved_full_merge_generic_check(void)
2378 {
2379 int nid_req = 6;
2380 int nid_next = nid_req + 1;
2381 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2382 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2383 struct memblock_region *next_node = &memblock.memory.regions[nid_next];
2384 void *allocated_ptr = NULL;
2385 struct region r1, r2;
2386 phys_addr_t size = req_node->size;
2387 phys_addr_t total_size;
2388 phys_addr_t max_addr;
2389 phys_addr_t min_addr;
2390
2391 PREFIX_PUSH();
2392 setup_numa_memblock(node_fractions);
2393
2394 r1.base = next_node->base;
2395 r1.size = SZ_128;
2396
2397 r2.size = SZ_128;
2398 r2.base = r1.base - (size + r2.size);
2399
2400 total_size = r1.size + r2.size + size;
2401 min_addr = r2.base + r2.size;
2402 max_addr = r1.base;
2403
2404 memblock_reserve(r1.base, r1.size);
2405 memblock_reserve(r2.base, r2.size);
2406
2407 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2408 min_addr, max_addr, nid_req);
2409
2410 ASSERT_NE(allocated_ptr, NULL);
2411 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2412
2413 ASSERT_EQ(new_rgn->size, total_size);
2414 ASSERT_EQ(new_rgn->base, r2.base);
2415
2416 ASSERT_LE(new_rgn->base, req_node->base);
2417 ASSERT_LE(region_end(req_node), region_end(new_rgn));
2418
2419 ASSERT_EQ(memblock.reserved.cnt, 1);
2420 ASSERT_EQ(memblock.reserved.total_size, total_size);
2421
2422 test_pass_pop();
2423
2424 return 0;
2425 }
2426
2427 /*
2428 * A test that tries to allocate memory within min_addr and max_add range,
2429 * where the total range can fit the region, but it is split between two nodes
2430 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2431 * instead of requesting a specific node:
2432 *
2433 * +-----------+
2434 * | new |
2435 * +-----------+
2436 * | +---------------------+-----------|
2437 * | | prev node | next node |
2438 * +------+---------------------+-----------+
2439 * + +
2440 * |----------------------+ +-----|
2441 * | r1 | | r2 |
2442 * +----------------------+-----------+-----+
2443 * ^ ^
2444 * | |
2445 * | max_addr
2446 * |
2447 * min_addr
2448 *
2449 * Expect no allocation to happen.
2450 */
alloc_try_nid_numa_split_all_reserved_generic_check(void)2451 static int alloc_try_nid_numa_split_all_reserved_generic_check(void)
2452 {
2453 void *allocated_ptr = NULL;
2454 struct memblock_region *next_node = &memblock.memory.regions[7];
2455 struct region r1, r2;
2456 phys_addr_t size = SZ_256;
2457 phys_addr_t max_addr;
2458 phys_addr_t min_addr;
2459
2460 PREFIX_PUSH();
2461 setup_numa_memblock(node_fractions);
2462
2463 r2.base = next_node->base + SZ_128;
2464 r2.size = memblock_end_of_DRAM() - r2.base;
2465
2466 r1.size = MEM_SIZE - (r2.size + size);
2467 r1.base = memblock_start_of_DRAM();
2468
2469 min_addr = r1.base + r1.size;
2470 max_addr = r2.base;
2471
2472 memblock_reserve(r1.base, r1.size);
2473 memblock_reserve(r2.base, r2.size);
2474
2475 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2476 min_addr, max_addr,
2477 NUMA_NO_NODE);
2478
2479 ASSERT_EQ(allocated_ptr, NULL);
2480
2481 test_pass_pop();
2482
2483 return 0;
2484 }
2485
2486 /* Test case wrappers for NUMA tests */
alloc_try_nid_numa_simple_check(void)2487 static int alloc_try_nid_numa_simple_check(void)
2488 {
2489 test_print("\tRunning %s...\n", __func__);
2490 memblock_set_bottom_up(false);
2491 alloc_try_nid_top_down_numa_simple_check();
2492 memblock_set_bottom_up(true);
2493 alloc_try_nid_bottom_up_numa_simple_check();
2494
2495 return 0;
2496 }
2497
alloc_try_nid_numa_small_node_check(void)2498 static int alloc_try_nid_numa_small_node_check(void)
2499 {
2500 test_print("\tRunning %s...\n", __func__);
2501 memblock_set_bottom_up(false);
2502 alloc_try_nid_top_down_numa_small_node_check();
2503 memblock_set_bottom_up(true);
2504 alloc_try_nid_bottom_up_numa_small_node_check();
2505
2506 return 0;
2507 }
2508
alloc_try_nid_numa_node_reserved_check(void)2509 static int alloc_try_nid_numa_node_reserved_check(void)
2510 {
2511 test_print("\tRunning %s...\n", __func__);
2512 memblock_set_bottom_up(false);
2513 alloc_try_nid_top_down_numa_node_reserved_check();
2514 memblock_set_bottom_up(true);
2515 alloc_try_nid_bottom_up_numa_node_reserved_check();
2516
2517 return 0;
2518 }
2519
alloc_try_nid_numa_part_reserved_check(void)2520 static int alloc_try_nid_numa_part_reserved_check(void)
2521 {
2522 test_print("\tRunning %s...\n", __func__);
2523 memblock_set_bottom_up(false);
2524 alloc_try_nid_top_down_numa_part_reserved_check();
2525 memblock_set_bottom_up(true);
2526 alloc_try_nid_bottom_up_numa_part_reserved_check();
2527
2528 return 0;
2529 }
2530
alloc_try_nid_numa_part_reserved_fallback_check(void)2531 static int alloc_try_nid_numa_part_reserved_fallback_check(void)
2532 {
2533 test_print("\tRunning %s...\n", __func__);
2534 memblock_set_bottom_up(false);
2535 alloc_try_nid_top_down_numa_part_reserved_fallback_check();
2536 memblock_set_bottom_up(true);
2537 alloc_try_nid_bottom_up_numa_part_reserved_fallback_check();
2538
2539 return 0;
2540 }
2541
alloc_try_nid_numa_split_range_low_check(void)2542 static int alloc_try_nid_numa_split_range_low_check(void)
2543 {
2544 test_print("\tRunning %s...\n", __func__);
2545 memblock_set_bottom_up(false);
2546 alloc_try_nid_top_down_numa_split_range_low_check();
2547 memblock_set_bottom_up(true);
2548 alloc_try_nid_bottom_up_numa_split_range_low_check();
2549
2550 return 0;
2551 }
2552
alloc_try_nid_numa_split_range_high_check(void)2553 static int alloc_try_nid_numa_split_range_high_check(void)
2554 {
2555 test_print("\tRunning %s...\n", __func__);
2556 memblock_set_bottom_up(false);
2557 alloc_try_nid_top_down_numa_split_range_high_check();
2558 memblock_set_bottom_up(true);
2559 alloc_try_nid_bottom_up_numa_split_range_high_check();
2560
2561 return 0;
2562 }
2563
alloc_try_nid_numa_no_overlap_split_check(void)2564 static int alloc_try_nid_numa_no_overlap_split_check(void)
2565 {
2566 test_print("\tRunning %s...\n", __func__);
2567 memblock_set_bottom_up(false);
2568 alloc_try_nid_top_down_numa_no_overlap_split_check();
2569 memblock_set_bottom_up(true);
2570 alloc_try_nid_bottom_up_numa_no_overlap_split_check();
2571
2572 return 0;
2573 }
2574
alloc_try_nid_numa_no_overlap_low_check(void)2575 static int alloc_try_nid_numa_no_overlap_low_check(void)
2576 {
2577 test_print("\tRunning %s...\n", __func__);
2578 memblock_set_bottom_up(false);
2579 alloc_try_nid_top_down_numa_no_overlap_low_check();
2580 memblock_set_bottom_up(true);
2581 alloc_try_nid_bottom_up_numa_no_overlap_low_check();
2582
2583 return 0;
2584 }
2585
alloc_try_nid_numa_no_overlap_high_check(void)2586 static int alloc_try_nid_numa_no_overlap_high_check(void)
2587 {
2588 test_print("\tRunning %s...\n", __func__);
2589 memblock_set_bottom_up(false);
2590 alloc_try_nid_top_down_numa_no_overlap_high_check();
2591 memblock_set_bottom_up(true);
2592 alloc_try_nid_bottom_up_numa_no_overlap_high_check();
2593
2594 return 0;
2595 }
2596
alloc_try_nid_numa_large_region_check(void)2597 static int alloc_try_nid_numa_large_region_check(void)
2598 {
2599 test_print("\tRunning %s...\n", __func__);
2600 run_top_down(alloc_try_nid_numa_large_region_generic_check);
2601 run_bottom_up(alloc_try_nid_numa_large_region_generic_check);
2602
2603 return 0;
2604 }
2605
alloc_try_nid_numa_reserved_full_merge_check(void)2606 static int alloc_try_nid_numa_reserved_full_merge_check(void)
2607 {
2608 test_print("\tRunning %s...\n", __func__);
2609 run_top_down(alloc_try_nid_numa_reserved_full_merge_generic_check);
2610 run_bottom_up(alloc_try_nid_numa_reserved_full_merge_generic_check);
2611
2612 return 0;
2613 }
2614
alloc_try_nid_numa_split_all_reserved_check(void)2615 static int alloc_try_nid_numa_split_all_reserved_check(void)
2616 {
2617 test_print("\tRunning %s...\n", __func__);
2618 run_top_down(alloc_try_nid_numa_split_all_reserved_generic_check);
2619 run_bottom_up(alloc_try_nid_numa_split_all_reserved_generic_check);
2620
2621 return 0;
2622 }
2623
__memblock_alloc_nid_numa_checks(void)2624 int __memblock_alloc_nid_numa_checks(void)
2625 {
2626 test_print("Running %s NUMA tests...\n",
2627 get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
2628
2629 alloc_try_nid_numa_simple_check();
2630 alloc_try_nid_numa_small_node_check();
2631 alloc_try_nid_numa_node_reserved_check();
2632 alloc_try_nid_numa_part_reserved_check();
2633 alloc_try_nid_numa_part_reserved_fallback_check();
2634 alloc_try_nid_numa_split_range_low_check();
2635 alloc_try_nid_numa_split_range_high_check();
2636
2637 alloc_try_nid_numa_no_overlap_split_check();
2638 alloc_try_nid_numa_no_overlap_low_check();
2639 alloc_try_nid_numa_no_overlap_high_check();
2640 alloc_try_nid_numa_large_region_check();
2641 alloc_try_nid_numa_reserved_full_merge_check();
2642 alloc_try_nid_numa_split_all_reserved_check();
2643
2644 return 0;
2645 }
2646
memblock_alloc_nid_checks_internal(int flags)2647 static int memblock_alloc_nid_checks_internal(int flags)
2648 {
2649 alloc_nid_test_flags = flags;
2650
2651 prefix_reset();
2652 prefix_push(get_memblock_alloc_try_nid_name(flags));
2653
2654 reset_memblock_attributes();
2655 dummy_physical_memory_init();
2656
2657 memblock_alloc_nid_range_checks();
2658 memblock_alloc_nid_numa_checks();
2659
2660 dummy_physical_memory_cleanup();
2661
2662 prefix_pop();
2663
2664 return 0;
2665 }
2666
memblock_alloc_nid_checks(void)2667 int memblock_alloc_nid_checks(void)
2668 {
2669 memblock_alloc_nid_checks_internal(TEST_F_NONE);
2670 memblock_alloc_nid_checks_internal(TEST_F_RAW);
2671
2672 return 0;
2673 }
2674