• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015-2017 Red Hat, Inc.
3  *
4  * This program is free software;  you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY;  without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
12  * the GNU General Public License for more details.
13  */
14 
15 /*
16  * DESCRIPTION
17  *	shmget()/shmat() fails to allocate huge pages shared memory segment
18  *	with EINVAL if its size is not in the range [ N*HUGE_PAGE_SIZE - 4095,
19  *	N*HUGE_PAGE_SIZE ]. This is a problem in the memory segment size round
20  *	up algorithm. The requested size is rounded up to PAGE_SIZE (4096), but
21  *	if this roundup does not match HUGE_PAGE_SIZE (2Mb) boundary - the
22  *	allocation fails.
23  *
24  *	This bug is present in all RHEL6 versions, but not in RHEL7. It looks
25  *	like this was fixed in mainline kernel > v3.3 by the following patches:
26  *
27  *	091d0d5 (shm: fix null pointer deref when userspace specifies
28  *		 invalid hugepage size)
29  *	af73e4d (hugetlbfs: fix mmap failure in unaligned size request)
30  *	42d7395 (mm: support more pagesizes for MAP_HUGETLB/SHM_HUGETLB)
31  *	40716e2 (hugetlbfs: fix alignment of huge page requests)
32  *
33  * AUTHORS
34  *	Vladislav Dronov <vdronov@redhat.com>
35  *	Li Wang <liwang@redhat.com>
36  *
37  */
38 
39 #include "hugetlb.h"
40 
41 static long page_size;
42 static long hpage_size;
43 static long hugepages;
44 
45 #define N 4
46 
setup(void)47 void setup(void)
48 {
49 	save_nr_hugepages();
50 	page_size = getpagesize();
51 	hpage_size = SAFE_READ_MEMINFO("Hugepagesize:") * 1024;
52 
53 	hugepages = N + 1;
54 	set_sys_tune("nr_hugepages", hugepages, 1);
55 }
56 
cleanup(void)57 void cleanup(void)
58 {
59 	restore_nr_hugepages();
60 }
61 
shm_test(int size)62 void shm_test(int size)
63 {
64 	int shmid;
65 	char *shmaddr;
66 
67 	shmid = shmget(IPC_PRIVATE, size, 0600 | IPC_CREAT | SHM_HUGETLB);
68 	if (shmid < 0)
69 		tst_brk(TBROK | TERRNO, "shmget failed");
70 
71 	shmaddr = shmat(shmid, 0, 0);
72 	if (shmaddr == (char *)-1) {
73 		shmctl(shmid, IPC_RMID, NULL);
74 		tst_brk(TFAIL | TERRNO,
75 			 "Bug: shared memory attach failure.");
76 	}
77 
78 	shmaddr[0] = 1;
79 	tst_res(TINFO, "allocated %d huge bytes", size);
80 
81 	if (shmdt((const void *)shmaddr) != 0) {
82 		shmctl(shmid, IPC_RMID, NULL);
83 		tst_brk(TFAIL | TERRNO, "Detach failure.");
84 	}
85 
86 	shmctl(shmid, IPC_RMID, NULL);
87 }
88 
test_hugeshmat(void)89 static void test_hugeshmat(void)
90 {
91 	unsigned int i;
92 
93 	const int tst_sizes[] = {
94 		N * hpage_size - page_size,
95 		N * hpage_size - page_size - 1,
96 		hpage_size,
97 		hpage_size + 1
98 	};
99 
100 	for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i)
101 		shm_test(tst_sizes[i]);
102 
103 	tst_res(TPASS, "No regression found.");
104 }
105 
106 static struct tst_test test = {
107 	.needs_root = 1,
108 	.needs_tmpdir = 1,
109 	.test_all = test_hugeshmat,
110 	.setup = setup,
111 	.cleanup = cleanup,
112 };
113