• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define TST_NO_DEFAULT_MAIN
2 
3 #include "config.h"
4 #include <sys/types.h>
5 #include <sys/mman.h>
6 #include <sys/mount.h>
7 #include <sys/stat.h>
8 #include <sys/wait.h>
9 #include <sys/param.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #if HAVE_NUMA_H
13 #include <numa.h>
14 #endif
15 #if HAVE_NUMAIF_H
16 #include <numaif.h>
17 #endif
18 #include <pthread.h>
19 #include <stdarg.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 
25 #include "mem.h"
26 #include "numa_helper.h"
27 
28 /* OOM */
29 
alloc_mem(long int length,int testcase)30 static int alloc_mem(long int length, int testcase)
31 {
32 	char *s;
33 	long i, pagesz = getpagesize();
34 	int loop = 10;
35 
36 	tst_res(TINFO, "thread (%lx), allocating %ld bytes.",
37 		(unsigned long) pthread_self(), length);
38 
39 	s = mmap(NULL, length, PROT_READ | PROT_WRITE,
40 		 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
41 	if (s == MAP_FAILED)
42 		return errno;
43 
44 	if (testcase == MLOCK) {
45 		while (mlock(s, length) == -1 && loop > 0) {
46 			if (EAGAIN != errno)
47 				return errno;
48 			usleep(300000);
49 			loop--;
50 		}
51 	}
52 
53 #ifdef HAVE_DECL_MADV_MERGEABLE
54 	if (testcase == KSM && madvise(s, length, MADV_MERGEABLE) == -1)
55 		return errno;
56 #endif
57 	for (i = 0; i < length; i += pagesz)
58 		s[i] = '\a';
59 
60 	return 0;
61 }
62 
child_alloc_thread(void * args)63 static void *child_alloc_thread(void *args)
64 {
65 	int ret = 0;
66 
67 	/* keep allocating until there's an error */
68 	while (!ret)
69 		ret = alloc_mem(LENGTH, (long)args);
70 	exit(ret);
71 }
72 
child_alloc(int testcase,int lite,int threads)73 static void child_alloc(int testcase, int lite, int threads)
74 {
75 	int i;
76 	pthread_t *th;
77 
78 	if (lite) {
79 		int ret = alloc_mem(TESTMEM * 2 + MB, testcase);
80 		exit(ret);
81 	}
82 
83 	th = malloc(sizeof(pthread_t) * threads);
84 	if (!th) {
85 		tst_res(TINFO | TERRNO, "malloc");
86 		goto out;
87 	}
88 
89 	for (i = 0; i < threads; i++) {
90 		TEST(pthread_create(&th[i], NULL, child_alloc_thread,
91 			(void *)((long)testcase)));
92 		if (TST_RET) {
93 			tst_res(TINFO | TRERRNO, "pthread_create");
94 			/*
95 			 * Keep going if thread other than first fails to
96 			 * spawn due to lack of resources.
97 			 */
98 			if (i == 0 || TST_RET != EAGAIN)
99 				goto out;
100 		}
101 	}
102 
103 	/* wait for one of threads to exit whole process */
104 	while (1)
105 		sleep(1);
106 out:
107 	exit(1);
108 }
109 
110 /*
111  * oom - allocates memory according to specified testcase and checks
112  *       desired outcome (e.g. child killed, operation failed with ENOMEM)
113  * @testcase: selects how child allocates memory
114  *            valid choices are: NORMAL, MLOCK and KSM
115  * @lite: if non-zero, child makes only single TESTMEM+MB allocation
116  *        if zero, child keeps allocating memory until it gets killed
117  *        or some operation fails
118  * @retcode: expected return code of child process
119  *           if matches child ret code, this function reports PASS,
120  *           otherwise it reports FAIL
121  * @allow_sigkill: if zero and child is killed, this function reports FAIL
122  *                 if non-zero, then if child is killed by SIGKILL
123  *                 it is considered as PASS
124  */
oom(int testcase,int lite,int retcode,int allow_sigkill)125 void oom(int testcase, int lite, int retcode, int allow_sigkill)
126 {
127 	pid_t pid;
128 	int status, threads;
129 
130 	tst_enable_oom_protection(0);
131 
132 	switch (pid = SAFE_FORK()) {
133 	case 0:
134 		tst_disable_oom_protection(0);
135 		threads = MAX(1, tst_ncpus() - 1);
136 		child_alloc(testcase, lite, threads);
137 	default:
138 		break;
139 	}
140 
141 	tst_res(TINFO, "expected victim is %d.", pid);
142 	SAFE_WAITPID(-1, &status, 0);
143 
144 	if (WIFSIGNALED(status)) {
145 		if (allow_sigkill && WTERMSIG(status) == SIGKILL) {
146 			tst_res(TPASS, "victim signalled: (%d) %s",
147 				SIGKILL,
148 				tst_strsig(SIGKILL));
149 		} else {
150 			tst_res(TFAIL, "victim signalled: (%d) %s",
151 				WTERMSIG(status),
152 				tst_strsig(WTERMSIG(status)));
153 		}
154 	} else if (WIFEXITED(status)) {
155 		if (WEXITSTATUS(status) == retcode) {
156 			tst_res(TPASS, "victim retcode: (%d) %s",
157 				retcode, strerror(retcode));
158 		} else {
159 			tst_res(TFAIL, "victim unexpectedly ended with "
160 				"retcode: %d, expected: %d",
161 				WEXITSTATUS(status), retcode);
162 		}
163 	} else {
164 		tst_res(TFAIL, "victim unexpectedly ended");
165 	}
166 }
167 
168 #ifdef HAVE_NUMA_V2
set_global_mempolicy(int mempolicy)169 static void set_global_mempolicy(int mempolicy)
170 {
171 	unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 };
172 	int num_nodes, *nodes;
173 	int ret;
174 
175 	if (mempolicy) {
176 		ret = get_allowed_nodes_arr(NH_MEMS|NH_CPUS, &num_nodes, &nodes);
177 		if (ret != 0)
178 			tst_brk(TBROK|TERRNO, "get_allowed_nodes_arr");
179 		if (num_nodes < 2) {
180 			tst_res(TINFO, "mempolicy need NUMA system support");
181 			free(nodes);
182 			return;
183 		}
184 		switch(mempolicy) {
185 		case MPOL_BIND:
186 			/* bind the second node */
187 			set_node(nmask, nodes[1]);
188 			break;
189 		case MPOL_INTERLEAVE:
190 		case MPOL_PREFERRED:
191 			if (num_nodes == 2) {
192 				tst_res(TINFO, "The mempolicy need "
193 					 "more than 2 numa nodes");
194 				free(nodes);
195 				return;
196 			} else {
197 				/* Using the 2nd,3rd node */
198 				set_node(nmask, nodes[1]);
199 				set_node(nmask, nodes[2]);
200 			}
201 			break;
202 		default:
203 			tst_brk(TBROK|TERRNO, "Bad mempolicy mode");
204 		}
205 		if (set_mempolicy(mempolicy, nmask, MAXNODES) == -1)
206 			tst_brk(TBROK|TERRNO, "set_mempolicy");
207 	}
208 }
209 #else
set_global_mempolicy(int mempolicy LTP_ATTRIBUTE_UNUSED)210 static void set_global_mempolicy(int mempolicy LTP_ATTRIBUTE_UNUSED) { }
211 #endif
212 
testoom(int mempolicy,int lite,int retcode,int allow_sigkill)213 void testoom(int mempolicy, int lite, int retcode, int allow_sigkill)
214 {
215 	int ksm_run_orig;
216 
217 	set_global_mempolicy(mempolicy);
218 
219 	tst_res(TINFO, "start normal OOM testing.");
220 	oom(NORMAL, lite, retcode, allow_sigkill);
221 
222 	tst_res(TINFO, "start OOM testing for mlocked pages.");
223 	oom(MLOCK, lite, retcode, allow_sigkill);
224 
225 	/*
226 	 * Skip oom(KSM) if lite == 1, since limit_in_bytes may vary from
227 	 * run to run, which isn't reliable for oom03 cgroup test.
228 	 */
229 	if (access(PATH_KSM, F_OK) == -1 || lite == 1) {
230 		tst_res(TINFO, "KSM is not configed or lite == 1, "
231 			 "skip OOM test for KSM pags");
232 	} else {
233 		tst_res(TINFO, "start OOM testing for KSM pages.");
234 		SAFE_FILE_SCANF(PATH_KSM "run", "%d", &ksm_run_orig);
235 		SAFE_FILE_PRINTF(PATH_KSM "run", "1");
236 		oom(KSM, lite, retcode, allow_sigkill);
237 		SAFE_FILE_PRINTF(PATH_KSM "run", "%d", ksm_run_orig);
238 	}
239 }
240 
241 /* KSM */
242 
check(char * path,long int value)243 static void check(char *path, long int value)
244 {
245 	char fullpath[BUFSIZ];
246 	long actual_val;
247 
248 	snprintf(fullpath, BUFSIZ, PATH_KSM "%s", path);
249 	SAFE_FILE_SCANF(fullpath, "%ld", &actual_val);
250 
251 	if (actual_val != value)
252 		tst_res(TFAIL, "%s is not %ld but %ld.", path, value,
253 			actual_val);
254 	else
255 		tst_res(TPASS, "%s is %ld.", path, actual_val);
256 }
257 
final_group_check(int run,int pages_shared,int pages_sharing,int pages_volatile,int pages_unshared,int sleep_millisecs,int pages_to_scan)258 static void final_group_check(int run, int pages_shared, int pages_sharing,
259 			  int pages_volatile, int pages_unshared,
260 			  int sleep_millisecs, int pages_to_scan)
261 {
262 	int ksm_run_orig;
263 
264 	tst_res(TINFO, "check!");
265 	check("run", run);
266 
267 	/*
268 	 * Temporarily stop the KSM scan during the checks: during the
269 	 * KSM scan the rmap_items in the stale unstable tree of the
270 	 * old pass are removed from it and are later reinserted in
271 	 * the new unstable tree of the current pass. So if the checks
272 	 * run in the race window between removal and re-insertion, it
273 	 * can lead to unexpected false positives where page_volatile
274 	 * is elevated and page_unshared is recessed.
275 	 */
276 	SAFE_FILE_SCANF(PATH_KSM "run", "%d", &ksm_run_orig);
277 	SAFE_FILE_PRINTF(PATH_KSM "run", "0");
278 
279 	check("pages_shared", pages_shared);
280 	check("pages_sharing", pages_sharing);
281 	check("pages_volatile", pages_volatile);
282 	check("pages_unshared", pages_unshared);
283 	check("sleep_millisecs", sleep_millisecs);
284 	check("pages_to_scan", pages_to_scan);
285 
286 	SAFE_FILE_PRINTF(PATH_KSM "run", "%d", ksm_run_orig);
287 }
288 
ksm_group_check(int run,int pages_shared,int pages_sharing,int pages_volatile,int pages_unshared,int sleep_millisecs,int pages_to_scan)289 void ksm_group_check(int run, int pages_shared, int pages_sharing,
290 		     int pages_volatile, int pages_unshared,
291 		     int sleep_millisecs, int pages_to_scan)
292 {
293 	if (run != 1) {
294 		tst_res(TFAIL, "group_check run is not 1, %d.", run);
295 	} else {
296 		/* wait for ksm daemon to scan all mergeable pages. */
297 		wait_ksmd_full_scan();
298 	}
299 
300 	final_group_check(run, pages_shared, pages_sharing,
301 			  pages_volatile, pages_unshared,
302 			  sleep_millisecs, pages_to_scan);
303 }
304 
verify(char ** memory,char value,int proc,int start,int end,int start2,int end2)305 static void verify(char **memory, char value, int proc,
306 		    int start, int end, int start2, int end2)
307 {
308 	int i, j;
309 	void *s = NULL;
310 
311 	s = SAFE_MALLOC((end - start) * (end2 - start2));
312 
313 	tst_res(TINFO, "child %d verifies memory content.", proc);
314 	memset(s, value, (end - start) * (end2 - start2));
315 	if (memcmp(memory[start], s, (end - start) * (end2 - start2))
316 	    != 0)
317 		for (j = start; j < end; j++)
318 			for (i = start2; i < end2; i++)
319 				if (memory[j][i] != value)
320 					tst_res(TFAIL, "child %d has %c at "
321 						 "%d,%d,%d.",
322 						 proc, memory[j][i], proc,
323 						 j, i);
324 	free(s);
325 }
326 
check_hugepage(void)327 void check_hugepage(void)
328 {
329 	if (access(PATH_HUGEPAGES, F_OK))
330 		tst_brk(TCONF, "Huge page is not supported.");
331 }
332 
333 struct ksm_merge_data {
334 	char data;
335 	unsigned int mergeable_size;
336 };
337 
ksm_child_memset(int child_num,int size,int total_unit,struct ksm_merge_data ksm_merge_data,char ** memory)338 static void ksm_child_memset(int child_num, int size, int total_unit,
339 		 struct ksm_merge_data ksm_merge_data, char **memory)
340 {
341 	int i = 0, j;
342 	int unit = size / total_unit;
343 
344 	tst_res(TINFO, "child %d continues...", child_num);
345 
346 	if (ksm_merge_data.mergeable_size == size * MB) {
347 		tst_res(TINFO, "child %d allocates %d MB filled with '%c'",
348 			child_num, size, ksm_merge_data.data);
349 
350 	} else {
351 		tst_res(TINFO, "child %d allocates %d MB filled with '%c'"
352 				" except one page with 'e'",
353 				child_num, size, ksm_merge_data.data);
354 	}
355 
356 	for (j = 0; j < total_unit; j++) {
357 		for (i = 0; (unsigned int)i < unit * MB; i++)
358 			memory[j][i] = ksm_merge_data.data;
359 	}
360 
361 	/* if it contains unshared page, then set 'e' char
362 	 * at the end of the last page
363 	 */
364 	if (ksm_merge_data.mergeable_size < size * MB)
365 		memory[j-1][i-1] = 'e';
366 }
367 
create_ksm_child(int child_num,int size,int unit,struct ksm_merge_data * ksm_merge_data)368 static void create_ksm_child(int child_num, int size, int unit,
369 		       struct ksm_merge_data *ksm_merge_data)
370 {
371 	int j, total_unit;
372 	char **memory;
373 
374 	/* The total units in all */
375 	total_unit = size / unit;
376 
377 	/* Apply for the space for memory */
378 	memory = SAFE_MALLOC(total_unit * sizeof(char *));
379 	for (j = 0; j < total_unit; j++) {
380 		memory[j] = SAFE_MMAP(NULL, unit * MB, PROT_READ|PROT_WRITE,
381 			MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
382 #ifdef HAVE_DECL_MADV_MERGEABLE
383 		if (madvise(memory[j], unit * MB, MADV_MERGEABLE) == -1)
384 			tst_brk(TBROK|TERRNO, "madvise");
385 #endif
386 	}
387 
388 	tst_res(TINFO, "child %d stops.", child_num);
389 	if (raise(SIGSTOP) == -1)
390 		tst_brk(TBROK|TERRNO, "kill");
391 	fflush(stdout);
392 
393 	for (j = 0; j < 4; j++) {
394 
395 		ksm_child_memset(child_num, size, total_unit,
396 				  ksm_merge_data[j], memory);
397 
398 		fflush(stdout);
399 
400 		tst_res(TINFO, "child %d stops.", child_num);
401 		if (raise(SIGSTOP) == -1)
402 			tst_brk(TBROK|TERRNO, "kill");
403 
404 		if (ksm_merge_data[j].mergeable_size < size * MB) {
405 			verify(memory, 'e', child_num, total_unit - 1,
406 				total_unit, unit * MB - 1, unit * MB);
407 			verify(memory, ksm_merge_data[j].data, child_num,
408 				0, total_unit, 0, unit * MB - 1);
409 		} else {
410 			verify(memory, ksm_merge_data[j].data, child_num,
411 				0, total_unit, 0, unit * MB);
412 		}
413 	}
414 
415 	tst_res(TINFO, "child %d finished.", child_num);
416 }
417 
stop_ksm_children(int * child,int num)418 static void stop_ksm_children(int *child, int num)
419 {
420 	int k, status;
421 
422 	tst_res(TINFO, "wait for all children to stop.");
423 	for (k = 0; k < num; k++) {
424 		SAFE_WAITPID(child[k], &status, WUNTRACED);
425 		if (!WIFSTOPPED(status))
426 			tst_brk(TBROK, "child %d was not stopped", k);
427 	}
428 }
429 
resume_ksm_children(int * child,int num)430 static void resume_ksm_children(int *child, int num)
431 {
432 	int k;
433 
434 	tst_res(TINFO, "resume all children.");
435 	for (k = 0; k < num; k++)
436 		SAFE_KILL(child[k], SIGCONT);
437 
438 	fflush(stdout);
439 }
440 
create_same_memory(int size,int num,int unit)441 void create_same_memory(int size, int num, int unit)
442 {
443 	int i, j, status, *child;
444 	unsigned long ps, pages;
445 	struct ksm_merge_data **ksm_data;
446 
447 	struct ksm_merge_data ksm_data0[] = {
448 	       {'c', size*MB}, {'c', size*MB}, {'d', size*MB}, {'d', size*MB},
449 	};
450 	struct ksm_merge_data ksm_data1[] = {
451 	       {'a', size*MB}, {'b', size*MB}, {'d', size*MB}, {'d', size*MB-1},
452 	};
453 	struct ksm_merge_data ksm_data2[] = {
454 	       {'a', size*MB}, {'a', size*MB}, {'d', size*MB}, {'d', size*MB},
455 	};
456 
457 	ps = sysconf(_SC_PAGE_SIZE);
458 	pages = MB / ps;
459 
460 	ksm_data = malloc((num - 3) * sizeof(struct ksm_merge_data *));
461 	/* Since from third child, the data is same with the first child's */
462 	for (i = 0; i < num - 3; i++) {
463 		ksm_data[i] = malloc(4 * sizeof(struct ksm_merge_data));
464 		for (j = 0; j < 4; j++) {
465 			ksm_data[i][j].data = ksm_data0[j].data;
466 			ksm_data[i][j].mergeable_size =
467 				ksm_data0[j].mergeable_size;
468 		}
469 	}
470 
471 	child = SAFE_MALLOC(num * sizeof(int));
472 
473 	for (i = 0; i < num; i++) {
474 		fflush(stdout);
475 		switch (child[i] = SAFE_FORK()) {
476 		case 0:
477 			if (i == 0) {
478 				create_ksm_child(i, size, unit, ksm_data0);
479 				exit(0);
480 			} else if (i == 1) {
481 				create_ksm_child(i, size, unit, ksm_data1);
482 				exit(0);
483 			} else if (i == 2) {
484 				create_ksm_child(i, size, unit, ksm_data2);
485 				exit(0);
486 			} else {
487 				create_ksm_child(i, size, unit, ksm_data[i-3]);
488 				exit(0);
489 			}
490 		}
491 	}
492 
493 	stop_ksm_children(child, num);
494 
495 	tst_res(TINFO, "KSM merging...");
496 	if (access(PATH_KSM "max_page_sharing", F_OK) == 0) {
497 		SAFE_FILE_PRINTF(PATH_KSM "run", "2");
498 		SAFE_FILE_PRINTF(PATH_KSM "max_page_sharing", "%ld", size * pages * num);
499 	}
500 
501 	SAFE_FILE_PRINTF(PATH_KSM "run", "1");
502 	SAFE_FILE_PRINTF(PATH_KSM "pages_to_scan", "%ld", size * pages * num);
503 	SAFE_FILE_PRINTF(PATH_KSM "sleep_millisecs", "0");
504 
505 	resume_ksm_children(child, num);
506 	stop_ksm_children(child, num);
507 	ksm_group_check(1, 2, size * num * pages - 2, 0, 0, 0, size * pages * num);
508 
509 	resume_ksm_children(child, num);
510 	stop_ksm_children(child, num);
511 	ksm_group_check(1, 3, size * num * pages - 3, 0, 0, 0, size * pages * num);
512 
513 	resume_ksm_children(child, num);
514 	stop_ksm_children(child, num);
515 	ksm_group_check(1, 1, size * num * pages - 1, 0, 0, 0, size * pages * num);
516 
517 	resume_ksm_children(child, num);
518 	stop_ksm_children(child, num);
519 	ksm_group_check(1, 1, size * num * pages - 2, 0, 1, 0, size * pages * num);
520 
521 	tst_res(TINFO, "KSM unmerging...");
522 	SAFE_FILE_PRINTF(PATH_KSM "run", "2");
523 
524 	resume_ksm_children(child, num);
525 	final_group_check(2, 0, 0, 0, 0, 0, size * pages * num);
526 
527 	tst_res(TINFO, "stop KSM.");
528 	SAFE_FILE_PRINTF(PATH_KSM "run", "0");
529 	final_group_check(0, 0, 0, 0, 0, 0, size * pages * num);
530 
531 	while (waitpid(-1, &status, 0) > 0)
532 		if (WEXITSTATUS(status) != 0)
533 			tst_res(TFAIL, "child exit status is %d",
534 				 WEXITSTATUS(status));
535 }
536 
537 /* THP */
538 
539 /* cpuset/memcg */
gather_node_cpus(char * cpus,long nd)540 static void gather_node_cpus(char *cpus, long nd)
541 {
542 	int ncpus = 0;
543 	int i;
544 	long online;
545 	char buf[BUFSIZ];
546 	char path[BUFSIZ], path1[BUFSIZ];
547 
548 	while (path_exist(PATH_SYS_SYSTEM "/cpu/cpu%d", ncpus))
549 		ncpus++;
550 
551 	for (i = 0; i < ncpus; i++) {
552 		snprintf(path, BUFSIZ,
553 			 PATH_SYS_SYSTEM "/node/node%ld/cpu%d", nd, i);
554 		if (path_exist(path)) {
555 			snprintf(path1, BUFSIZ, "%s/online", path);
556 			/*
557 			 * if there is no online knob, then the cpu cannot
558 			 * be taken offline
559 			 */
560 			if (path_exist(path1)) {
561 				SAFE_FILE_SCANF(path1, "%ld", &online);
562 				if (online == 0)
563 					continue;
564 			}
565 			sprintf(buf, "%d,", i);
566 			strcat(cpus, buf);
567 		}
568 	}
569 	/* Remove the trailing comma. */
570 	cpus[strlen(cpus) - 1] = '\0';
571 }
572 
write_cpusets(const struct tst_cg_group * cg,long nd)573 void write_cpusets(const struct tst_cg_group *cg, long nd)
574 {
575 	char cpus[BUFSIZ] = "";
576 
577 	SAFE_CG_PRINTF(cg, "cpuset.mems", "%ld", nd);
578 
579 	gather_node_cpus(cpus, nd);
580 	/*
581 	 * If the 'nd' node doesn't contain any CPUs,
582 	 * the first ID of CPU '0' will be used as
583 	 * the value of cpuset.cpus.
584 	 */
585 	if (strlen(cpus) != 0) {
586 		SAFE_CG_PRINT(cg, "cpuset.cpus", cpus);
587 	} else {
588 		tst_res(TINFO, "No CPUs in the node%ld; "
589 				"using only CPU0", nd);
590 		SAFE_CG_PRINT(cg, "cpuset.cpus", "0");
591 	}
592 }
593 
594 /* shared */
595 
596 /* Warning: *DO NOT* use this function in child */
get_a_numa_node(void)597 unsigned int get_a_numa_node(void)
598 {
599 	unsigned int nd1, nd2;
600 	int ret;
601 
602 	ret = get_allowed_nodes(0, 2, &nd1, &nd2);
603 	switch (ret) {
604 	case 0:
605 		break;
606 	case -3:
607 		tst_brk(TCONF, "requires a NUMA system.");
608 	default:
609 		tst_brk(TBROK | TERRNO, "1st get_allowed_nodes");
610 	}
611 
612 	ret = get_allowed_nodes(NH_MEMS | NH_CPUS, 1, &nd1);
613 	switch (ret) {
614 	case 0:
615 		tst_res(TINFO, "get node%u.", nd1);
616 		return nd1;
617 	case -3:
618 		tst_brk(TCONF, "requires a NUMA system that has "
619 			 "at least one node with both memory and CPU "
620 			 "available.");
621 	default:
622 		tst_brk(TBROK | TERRNO, "2nd get_allowed_nodes");
623 	}
624 
625 	/* not reached */
626 	abort();
627 }
628 
path_exist(const char * path,...)629 int path_exist(const char *path, ...)
630 {
631 	va_list ap;
632 	char pathbuf[PATH_MAX];
633 
634 	va_start(ap, path);
635 	vsnprintf(pathbuf, sizeof(pathbuf), path, ap);
636 	va_end(ap);
637 
638 	return access(pathbuf, F_OK) == 0;
639 }
640 
set_sys_tune(char * sys_file,long tune,int check)641 void set_sys_tune(char *sys_file, long tune, int check)
642 {
643 	long val;
644 	char path[BUFSIZ];
645 
646 	tst_res(TINFO, "set %s to %ld", sys_file, tune);
647 
648 	snprintf(path, BUFSIZ, PATH_SYSVM "%s", sys_file);
649 	SAFE_FILE_PRINTF(path, "%ld", tune);
650 
651 	if (check) {
652 		val = get_sys_tune(sys_file);
653 		if (val != tune)
654 			tst_brk(TBROK, "%s = %ld, but expect %ld",
655 				 sys_file, val, tune);
656 	}
657 }
658 
get_sys_tune(char * sys_file)659 long get_sys_tune(char *sys_file)
660 {
661 	char path[BUFSIZ];
662 	long tune;
663 
664 	snprintf(path, BUFSIZ, PATH_SYSVM "%s", sys_file);
665 	SAFE_FILE_SCANF(path, "%ld", &tune);
666 
667 	return tune;
668 }
669 
update_shm_size(size_t * shm_size)670 void update_shm_size(size_t * shm_size)
671 {
672 	size_t shmmax;
673 
674 	SAFE_FILE_SCANF(PATH_SHMMAX, "%zu", &shmmax);
675 	if (*shm_size > shmmax) {
676 		tst_res(TINFO, "Set shm_size to shmmax: %zu", shmmax);
677 		*shm_size = shmmax;
678 	}
679 }
680 
range_is_mapped(unsigned long low,unsigned long high)681 int range_is_mapped(unsigned long low, unsigned long high)
682 {
683 	FILE *fp;
684 
685 	fp = fopen("/proc/self/maps", "r");
686 	if (fp == NULL)
687 		tst_brk(TBROK | TERRNO, "Failed to open /proc/self/maps.");
688 
689 	while (!feof(fp)) {
690 		unsigned long start, end;
691 		int ret;
692 
693 		ret = fscanf(fp, "%lx-%lx %*[^\n]\n", &start, &end);
694 		if (ret != 2) {
695 			fclose(fp);
696 			tst_brk(TBROK | TERRNO, "Couldn't parse /proc/self/maps line.");
697 		}
698 
699 		if ((start >= low) && (start < high)) {
700 			fclose(fp);
701 			return 1;
702 		}
703 		if ((end >= low) && (end < high)) {
704 			fclose(fp);
705 			return 1;
706 		}
707 	}
708 
709 	fclose(fp);
710 	return 0;
711 }
712