• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: GPL-2.0-or-later
3  *
4  * Copyright (c) 2019 Cyril Hrubis <chrubis@suse.cz>
5  */
6 
7 /*
8  * We are testing mbind() MPOL_MF_MOVE and MPOL_MF_MOVE_ALL.
9  *
10  * If one of these flags is passed along with the policy kernel attempts to
11  * move already faulted pages to match the requested policy.
12  */
13 
14 #include <errno.h>
15 #include "config.h"
16 #ifdef HAVE_NUMA_H
17 # include <numa.h>
18 # include <numaif.h>
19 # include "mbind.h"
20 #endif
21 #include "tst_test.h"
22 #include "tst_numa.h"
23 
24 #ifdef HAVE_NUMA_V2
25 
26 static size_t page_size;
27 static struct tst_nodemap *nodes;
28 
setup(void)29 static void setup(void)
30 {
31 	page_size = getpagesize();
32 
33 	nodes = tst_get_nodemap(TST_NUMA_MEM, 2 * page_size / 1024);
34 	if (nodes->cnt <= 1)
35 		tst_brk(TCONF, "Test requires at least two NUMA memory nodes");
36 }
37 
cleanup(void)38 static void cleanup(void)
39 {
40 	tst_nodemap_free(nodes);
41 }
42 
verify_policy(int mode,unsigned flag)43 static void verify_policy(int mode, unsigned flag)
44 {
45 	struct bitmask *bm = numa_allocate_nodemask();
46 	unsigned int i;
47 	void *ptr;
48 	unsigned long size = page_size;
49 	unsigned int node = 0;
50 
51 	ptr = tst_numa_map(NULL, size);
52 	tst_nodemap_reset_counters(nodes);
53 	tst_numa_fault(ptr, size);
54 	tst_nodemap_count_pages(nodes, ptr, size);
55 	tst_nodemap_print_counters(nodes);
56 
57 	for (i = 0; i < nodes->cnt; i++) {
58 		if (!nodes->counters[i]) {
59 			node = nodes->map[i];
60 			tst_res(TINFO, "Attempting to move to node %i", node);
61 			numa_bitmask_setbit(bm, node);
62 			break;
63 		}
64 	}
65 
66 	TEST(mbind(ptr, size, mode, bm->maskp, bm->size + 1, flag));
67 
68 	if (TST_RET) {
69 		tst_res(TFAIL | TTERRNO,
70 		        "mbind(%s, %s) node %u",
71 		        tst_numa_mode_name(mode), mbind_flag_name(flag), node);
72 		goto exit;
73 	} else {
74 		tst_res(TPASS, "mbind(%s, %s) node %u succeded",
75 		        tst_numa_mode_name(mode), mbind_flag_name(flag), node);
76 	}
77 
78 	tst_nodemap_reset_counters(nodes);
79 	tst_nodemap_count_pages(nodes, ptr, size);
80 
81 	for (i = 0; i < nodes->cnt; i++) {
82 		if (nodes->map[i] == node) {
83 			if (nodes->counters[i] == 1) {
84 				tst_res(TPASS, "Node %u allocated %u", node, 1);
85 			} else {
86 				tst_res(TFAIL, "Node %u allocated %u, expected %u",
87 				        node, nodes->counters[i], 0);
88 			}
89 			continue;
90 		}
91 
92 		if (nodes->counters[i]) {
93 			tst_res(TFAIL, "Node %u allocated %u, expected 0",
94 			        i, nodes->counters[i]);
95 		}
96 	}
97 
98 exit:
99 	tst_numa_unmap(ptr, size);
100 	numa_free_nodemask(bm);
101 }
102 
103 static const int modes[] = {
104 	MPOL_PREFERRED,
105 	MPOL_BIND,
106 	MPOL_INTERLEAVE,
107 };
108 
verify_mbind(unsigned int n)109 static void verify_mbind(unsigned int n)
110 {
111 	verify_policy(modes[n], MPOL_MF_MOVE);
112 	verify_policy(modes[n], MPOL_MF_MOVE_ALL);
113 }
114 
115 static struct tst_test test = {
116 	.setup = setup,
117 	.cleanup = cleanup,
118 	.test = verify_mbind,
119 	.tcnt = ARRAY_SIZE(modes),
120 	.needs_root = 1,
121 };
122 
123 #else
124 
125 TST_TEST_TCONF(NUMA_ERROR_MSG);
126 
127 #endif /* HAVE_NUMA_H */
128