• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2005-2006 David Gibson & Adam Litke, IBM Corporation.
4  * Author: David Gibson & Adam Litke
5  */
6 
7 /*\
8  * [Description]
9  *
10  * Certain kernels have a bug where brk() does not perform the same
11  * checks that a MAP_FIXED mmap() will, allowing brk() to create a
12  * normal page VMA in a hugepage only address region. This can lead
13  * to oopses or other badness.
14  */
15 
16 #define _GNU_SOURCE
17 #include <stdio.h>
18 #include <sys/mount.h>
19 #include <limits.h>
20 #include <sys/param.h>
21 #include <sys/types.h>
22 
23 #include "hugetlb.h"
24 #include "tst_safe_stdio.h"
25 
26 #define MNTPOINT "hugetlbfs/"
27 static long hpage_size;
28 static int huge_fd = -1;
29 
30 #ifdef __powerpc64__
arch_has_slice_support(void)31 static int arch_has_slice_support(void)
32 {
33 	char mmu_type[16];
34 
35 	SAFE_FILE_LINES_SCANF("/proc/cpuinfo", "MMU : %16s", mmu_type);
36 	return strcmp(mmu_type, "Hash") == 0;
37 }
38 
next_chunk(void * addr)39 static void *next_chunk(void *addr)
40 {
41 	if (!arch_has_slice_support())
42 		return PALIGN(addr, hpage_size);
43 
44 	if ((unsigned long)addr < 0x100000000UL)
45 		/* 256M segments below 4G */
46 		return PALIGN(addr, 0x10000000UL);
47 	/* 1TB segments above */
48 	return PALIGN(addr, 0x10000000000UL);
49 }
50 #elif defined(__powerpc__)
next_chunk(void * addr)51 static void *next_chunk(void *addr)
52 {
53 	if (tst_kernel_bits() == 32)
54 		return PALIGN(addr, hpage_size);
55 	else
56 		return PALIGN(addr, 0x10000000UL);
57 }
58 #elif defined(__ia64__)
next_chunk(void * addr)59 static void *next_chunk(void *addr)
60 {
61 	return PALIGN(addr, 0x8000000000000000UL);
62 }
63 #else
next_chunk(void * addr)64 static void *next_chunk(void *addr)
65 {
66 	return PALIGN(addr, hpage_size);
67 }
68 #endif
69 
run_test(void)70 static void run_test(void)
71 {
72 	void *brk0, *hugemap_addr, *newbrk;
73 	char *p;
74 	int err;
75 
76 	brk0 = sbrk(0);
77 	tst_res(TINFO, "Initial break at %p", brk0);
78 
79 	hugemap_addr = next_chunk(brk0) + hpage_size;
80 
81 	p = SAFE_MMAP(hugemap_addr, hpage_size, PROT_READ|PROT_WRITE,
82 			MAP_PRIVATE|MAP_FIXED, huge_fd, 0);
83 	if (p != hugemap_addr) {
84 		tst_res(TFAIL, "mmap() at unexpected address %p instead of %p\n", p,
85 		     hugemap_addr);
86 		goto cleanup;
87 	}
88 
89 	newbrk = next_chunk(brk0) + getpagesize();
90 	err = brk((void *)newbrk);
91 	if (err == -1) {
92 		/* Failing the brk() is an acceptable kernel response */
93 		tst_res(TPASS, "Failing the brk at %p is an acceptable response",
94 				newbrk);
95 	} else {
96 		/* Suceeding the brk() is acceptable if the new memory is
97 		 * properly accesible and we don't have a kernel blow up when
98 		 * we touch it.
99 		 */
100 		tst_res(TINFO, "New break at %p", newbrk);
101 		memset(brk0, 0, newbrk-brk0);
102 		tst_res(TPASS, "memory is accessible, hence successful brk() is "
103 				"an acceptable response");
104 	}
105 cleanup:
106 	SAFE_MUNMAP(p, hpage_size);
107 	err = brk(brk0);
108 	if (err == -1)
109 		tst_brk(TBROK, "Failed to set break at the original position");
110 }
111 
setup(void)112 static void setup(void)
113 {
114 	hpage_size = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SIZE)*1024;
115 	huge_fd = tst_creat_unlinked(MNTPOINT, 0);
116 }
117 
cleanup(void)118 static void cleanup(void)
119 {
120 	SAFE_CLOSE(huge_fd);
121 }
122 
123 static struct tst_test test = {
124 	.needs_root = 1,
125 	.mntpoint = MNTPOINT,
126 	.needs_hugetlbfs = 1,
127 	.taint_check = TST_TAINT_D | TST_TAINT_W,
128 	.setup = setup,
129 	.cleanup = cleanup,
130 	.test_all = run_test,
131 	.hugepages = {1, TST_NEEDS},
132 };
133