• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2019 Cyril Hrubis <chrubis@suse.cz>
4  */
5 
6 /*
7  * We are testing mbind() with MPOL_BIND, MPOL_PREFERRED and MPOL_INTERLEAVE
8  *
9  * For each node with memory we set its bit in nodemask with set_mempolicy()
10  * and verify that memory has been faulted accordingly.
11  */
12 
13 #include <errno.h>
14 #include "config.h"
15 #ifdef HAVE_NUMA_H
16 # include <numa.h>
17 # include <numaif.h>
18 # include "mbind.h"
19 #endif
20 #include "tst_test.h"
21 #include "tst_numa.h"
22 
23 #ifdef HAVE_NUMA_V2
24 
25 static size_t page_size;
26 static struct tst_nodemap *nodes;
27 
28 #define PAGES_ALLOCATED 16u
29 
setup(void)30 static void setup(void)
31 {
32 	page_size = getpagesize();
33 
34 	nodes = tst_get_nodemap(TST_NUMA_MEM, 2 * PAGES_ALLOCATED * page_size / 1024);
35 	if (nodes->cnt <= 1)
36 		tst_brk(TCONF, "Test requires at least two NUMA memory nodes");
37 }
38 
cleanup(void)39 static void cleanup(void)
40 {
41 	tst_nodemap_free(nodes);
42 }
43 
verify_policy(unsigned int node,int mode,unsigned flag)44 static void verify_policy(unsigned int node, int mode, unsigned flag)
45 {
46 	struct bitmask *bm = numa_allocate_nodemask();
47 	unsigned int i;
48 	void *ptr;
49 	pid_t pid;
50 	unsigned long size = PAGES_ALLOCATED * page_size;
51 
52 	numa_bitmask_setbit(bm, node);
53 
54 	ptr = tst_numa_map(NULL, size);
55 
56 	TEST(mbind(ptr, size, mode, bm->maskp, bm->size + 1, flag));
57 
58 	numa_free_nodemask(bm);
59 
60 	if (TST_RET) {
61 		tst_res(TFAIL | TTERRNO,
62 		        "mbind(%s, %s) node %u",
63 		        tst_mempolicy_mode_name(mode), mbind_flag_name(flag), node);
64 		return;
65 	}
66 
67 	tst_res(TPASS, "mbind(%s, %s) node %u",
68 	        tst_mempolicy_mode_name(mode), mbind_flag_name(flag), node);
69 
70 	const char *prefix = "child: ";
71 
72 	pid = SAFE_FORK();
73 	if (pid) {
74 		prefix = "parent: ";
75 		tst_reap_children();
76 	}
77 
78 	tst_nodemap_reset_counters(nodes);
79 	tst_numa_fault(ptr, size);
80 	tst_nodemap_count_pages(nodes, ptr, size);
81 	tst_numa_unmap(ptr, size);
82 
83 	int fail = 0;
84 
85 	for (i = 0; i < nodes->cnt; i++) {
86 		if (nodes->map[i] == node) {
87 			if (nodes->counters[i] == PAGES_ALLOCATED) {
88 				tst_res(TPASS, "%sNode %u allocated %u",
89 				        prefix, node, PAGES_ALLOCATED);
90 			} else {
91 				tst_res(TFAIL, "%sNode %u allocated %u, expected %u",
92 				        prefix, node, nodes->counters[i],
93 				        PAGES_ALLOCATED);
94 				fail = 1;
95 			}
96 			continue;
97 		}
98 
99 		if (nodes->counters[i]) {
100 			tst_res(TFAIL, "%sNode %u allocated %u, expected 0",
101 			        prefix, i, nodes->counters[i]);
102 			fail = 1;
103 		}
104 	}
105 
106 	if (fail)
107 		tst_nodemap_print_counters(nodes);
108 
109 	if (!pid)
110 		exit(0);
111 }
112 
113 static const int modes[] = {
114 	MPOL_PREFERRED,
115 	MPOL_BIND,
116 	MPOL_INTERLEAVE,
117 };
118 
verify_mbind(unsigned int n)119 static void verify_mbind(unsigned int n)
120 {
121 	unsigned int i;
122 
123 	for (i = 0; i < nodes->cnt; i++) {
124 		verify_policy(nodes->map[i], modes[n], 0);
125 		verify_policy(nodes->map[i], modes[n], MPOL_MF_STRICT);
126 	}
127 }
128 
129 static struct tst_test test = {
130 	.setup = setup,
131 	.cleanup = cleanup,
132 	.test = verify_mbind,
133 	.tcnt = ARRAY_SIZE(modes),
134 	.forks_child = 1,
135 };
136 
137 #else
138 
139 TST_TEST_TCONF(NUMA_ERROR_MSG);
140 
141 #endif /* HAVE_NUMA_H */
142