1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <errno.h>
20 #include <sched.h>
21 #include <sys/types.h>
22 #include <sys/wait.h>
23
24 #include "utils.h"
25
child_fn(void * i_ptr)26 static int child_fn(void* i_ptr) {
27 *reinterpret_cast<int*>(i_ptr) = 42;
28 return 123;
29 }
30
31 #if defined(__BIONIC__)
TEST(sched,clone)32 TEST(sched, clone) {
33 void* child_stack[1024];
34
35 int i = 0;
36 pid_t tid = clone(child_fn, &child_stack[1024], CLONE_VM, &i);
37
38 int status;
39 ASSERT_EQ(tid, TEMP_FAILURE_RETRY(waitpid(tid, &status, __WCLONE)));
40
41 ASSERT_EQ(42, i);
42
43 ASSERT_TRUE(WIFEXITED(status));
44 ASSERT_EQ(123, WEXITSTATUS(status));
45 }
46 #else
47 // For glibc, any call to clone with CLONE_VM set will cause later pthread
48 // calls in the same process to misbehave.
49 // See https://sourceware.org/bugzilla/show_bug.cgi?id=10311 for more details.
TEST(sched,clone)50 TEST(sched, clone) {
51 // In order to enumerate all possible tests for CTS, create an empty test.
52 GTEST_SKIP() << "glibc is broken";
53 }
54 #endif
55
TEST(sched,clone_errno)56 TEST(sched, clone_errno) {
57 // Check that our hand-written clone assembler sets errno correctly on failure.
58 uintptr_t fake_child_stack[16];
59 errno = 0;
60 // If CLONE_THREAD is set, CLONE_SIGHAND must be set too.
61 ASSERT_EQ(-1, clone(child_fn, &fake_child_stack[16], CLONE_THREAD, nullptr));
62 ASSERT_ERRNO(EINVAL);
63 }
64
TEST(sched,clone_null_child_stack)65 TEST(sched, clone_null_child_stack) {
66 int i = 0;
67 errno = 0;
68 ASSERT_EQ(-1, clone(child_fn, nullptr, CLONE_VM, &i));
69 ASSERT_ERRNO(EINVAL);
70 }
71
TEST(sched,cpu_set)72 TEST(sched, cpu_set) {
73 cpu_set_t set;
74
75 CPU_ZERO(&set);
76 CPU_SET(0, &set);
77 CPU_SET(17, &set);
78 for (int i = 0; i < CPU_SETSIZE; i++) {
79 ASSERT_EQ(i == 0 || i == 17, CPU_ISSET(i, &set));
80 }
81
82 // We should fail silently if we try to set/test outside the range.
83 CPU_SET(CPU_SETSIZE, &set);
84 ASSERT_FALSE(CPU_ISSET(CPU_SETSIZE, &set));
85 }
86
TEST(sched,cpu_count)87 TEST(sched, cpu_count) {
88 cpu_set_t set;
89
90 CPU_ZERO(&set);
91 ASSERT_EQ(0, CPU_COUNT(&set));
92 CPU_SET(2, &set);
93 CPU_SET(10, &set);
94 ASSERT_EQ(2, CPU_COUNT(&set));
95 CPU_CLR(10, &set);
96 ASSERT_EQ(1, CPU_COUNT(&set));
97 }
98
TEST(sched,cpu_zero)99 TEST(sched, cpu_zero) {
100 cpu_set_t set;
101
102 CPU_ZERO(&set);
103 ASSERT_EQ(0, CPU_COUNT(&set));
104 for (int i = 0; i < CPU_SETSIZE; i++) {
105 ASSERT_FALSE(CPU_ISSET(i, &set));
106 }
107 }
108
TEST(sched,cpu_clr)109 TEST(sched, cpu_clr) {
110 cpu_set_t set;
111
112 CPU_ZERO(&set);
113 CPU_SET(0, &set);
114 CPU_SET(1, &set);
115 for (int i = 0; i < CPU_SETSIZE; i++) {
116 ASSERT_EQ(i == 0 || i == 1, CPU_ISSET(i, &set));
117 }
118 CPU_CLR(1, &set);
119 for (int i = 0; i < CPU_SETSIZE; i++) {
120 ASSERT_EQ(i == 0, CPU_ISSET(i, &set));
121 }
122
123 // We should fail silently if we try to clear/test outside the range.
124 CPU_CLR(CPU_SETSIZE, &set);
125 ASSERT_FALSE(CPU_ISSET(CPU_SETSIZE, &set));
126 }
127
TEST(sched,cpu_equal)128 TEST(sched, cpu_equal) {
129 cpu_set_t set1;
130 cpu_set_t set2;
131
132 CPU_ZERO(&set1);
133 CPU_ZERO(&set2);
134 CPU_SET(1, &set1);
135 ASSERT_FALSE(CPU_EQUAL(&set1, &set2));
136 CPU_SET(1, &set2);
137 ASSERT_TRUE(CPU_EQUAL(&set1, &set2));
138 }
139
TEST(sched,cpu_op)140 TEST(sched, cpu_op) {
141 cpu_set_t set1;
142 cpu_set_t set2;
143 cpu_set_t set3;
144
145 CPU_ZERO(&set1);
146 CPU_ZERO(&set2);
147 CPU_ZERO(&set3);
148 CPU_SET(0, &set1);
149 CPU_SET(0, &set2);
150 CPU_SET(1, &set2);
151
152 CPU_AND(&set3, &set1, &set2);
153 for (int i = 0; i < CPU_SETSIZE; i++) {
154 ASSERT_EQ(i == 0, CPU_ISSET(i, &set3));
155 }
156
157 CPU_XOR(&set3, &set1, &set2);
158 for (int i = 0; i < CPU_SETSIZE; i++) {
159 ASSERT_EQ(i == 1, CPU_ISSET(i, &set3));
160 }
161
162 CPU_OR(&set3, &set1, &set2);
163 for (int i = 0; i < CPU_SETSIZE; i++) {
164 ASSERT_EQ(i == 0 || i == 1, CPU_ISSET(i, &set3));
165 }
166 }
167
TEST(sched,cpu_alloc_small)168 TEST(sched, cpu_alloc_small) {
169 cpu_set_t* set = CPU_ALLOC(17);
170 size_t size = CPU_ALLOC_SIZE(17);
171
172 CPU_ZERO_S(size, set);
173 ASSERT_EQ(0, CPU_COUNT_S(size, set));
174 CPU_SET_S(16, size, set);
175 ASSERT_TRUE(CPU_ISSET_S(16, size, set));
176
177 CPU_FREE(set);
178 }
179
TEST(sched,cpu_alloc_big)180 TEST(sched, cpu_alloc_big) {
181 cpu_set_t* set = CPU_ALLOC(10 * CPU_SETSIZE);
182 size_t size = CPU_ALLOC_SIZE(10 * CPU_SETSIZE);
183
184 CPU_ZERO_S(size, set);
185 ASSERT_EQ(0, CPU_COUNT_S(size, set));
186 CPU_SET_S(CPU_SETSIZE, size, set);
187 ASSERT_TRUE(CPU_ISSET_S(CPU_SETSIZE, size, set));
188
189 CPU_FREE(set);
190 }
191
TEST(sched,cpu_s_macros)192 TEST(sched, cpu_s_macros) {
193 int set_size = 64;
194 size_t size = CPU_ALLOC_SIZE(set_size);
195 cpu_set_t* set = CPU_ALLOC(set_size);
196
197 CPU_ZERO_S(size, set);
198 for (int i = 0; i < set_size; i++) {
199 ASSERT_FALSE(CPU_ISSET_S(i, size, set));
200 CPU_SET_S(i, size, set);
201 ASSERT_TRUE(CPU_ISSET_S(i, size, set));
202 ASSERT_EQ(i + 1, CPU_COUNT_S(size, set));
203 }
204
205 for (int i = 0; i < set_size; i++) {
206 CPU_CLR_S(i, size, set);
207 ASSERT_FALSE(CPU_ISSET_S(i, size, set));
208 ASSERT_EQ(set_size - i - 1, CPU_COUNT_S(size, set));
209 }
210
211 CPU_FREE(set);
212 }
213
TEST(sched,cpu_op_s_macros)214 TEST(sched, cpu_op_s_macros) {
215 int set_size1 = 64;
216 int set_size2 = set_size1 * 2;
217 int set_size3 = set_size1 * 3;
218 size_t size1 = CPU_ALLOC_SIZE(set_size1);
219 size_t size2 = CPU_ALLOC_SIZE(set_size2);
220 size_t size3 = CPU_ALLOC_SIZE(set_size3);
221
222 cpu_set_t* set1 = CPU_ALLOC(set_size1);
223 cpu_set_t* set2 = CPU_ALLOC(set_size2);
224 cpu_set_t* set3 = CPU_ALLOC(set_size3);
225 CPU_ZERO_S(size1, set1);
226 CPU_ZERO_S(size2, set2);
227 CPU_ZERO_S(size3, set3);
228
229 CPU_SET_S(0, size1, set1);
230 CPU_SET_S(0, size2, set2);
231 CPU_SET_S(1, size3, set2);
232
233 CPU_AND_S(size1, set3, set1, set2);
234 for (int i = 0; i < set_size3; i++) {
235 ASSERT_EQ(i == 0, CPU_ISSET_S(i, size3, set3));
236 }
237
238 CPU_OR_S(size1, set3, set1, set2);
239 for (int i = 0; i < set_size3; i++) {
240 ASSERT_EQ(i == 0 || i == 1, CPU_ISSET_S(i, size3, set3));
241 }
242
243 CPU_XOR_S(size1, set3, set1, set2);
244 for (int i = 0; i < set_size3; i++) {
245 ASSERT_EQ(i == 1, CPU_ISSET_S(i, size3, set3));
246 }
247
248 CPU_FREE(set1);
249 CPU_FREE(set2);
250 CPU_FREE(set3);
251 }
252
TEST(sched,cpu_equal_s)253 TEST(sched, cpu_equal_s) {
254 int set_size1 = 64;
255 int set_size2 = set_size1 * 2;
256 size_t size1 = CPU_ALLOC_SIZE(set_size1);
257 size_t size2 = CPU_ALLOC_SIZE(set_size2);
258
259 cpu_set_t* set1 = CPU_ALLOC(set_size1);
260 cpu_set_t* set2 = CPU_ALLOC(set_size2);
261
262 CPU_ZERO_S(size1, set1);
263 CPU_ZERO_S(size2, set2);
264
265 CPU_SET_S(0, size1, set1);
266 ASSERT_TRUE(CPU_EQUAL_S(size1, set1, set1));
267 ASSERT_FALSE(CPU_EQUAL_S(size1, set1, set2));
268 CPU_SET_S(0, size2, set2);
269 ASSERT_TRUE(CPU_EQUAL_S(size1, set1, set2));
270
271 CPU_FREE(set1);
272 CPU_FREE(set2);
273 }
274
TEST(sched,sched_get_priority_min_sched_get_priority_max)275 TEST(sched, sched_get_priority_min_sched_get_priority_max) {
276 EXPECT_LE(sched_get_priority_min(SCHED_BATCH), sched_get_priority_max(SCHED_BATCH));
277 EXPECT_LE(sched_get_priority_min(SCHED_FIFO), sched_get_priority_max(SCHED_FIFO));
278 EXPECT_LE(sched_get_priority_min(SCHED_IDLE), sched_get_priority_max(SCHED_IDLE));
279 EXPECT_LE(sched_get_priority_min(SCHED_OTHER), sched_get_priority_max(SCHED_OTHER));
280 EXPECT_LE(sched_get_priority_min(SCHED_RR), sched_get_priority_max(SCHED_RR));
281 }
282
TEST(sched,sched_getscheduler_sched_setscheduler)283 TEST(sched, sched_getscheduler_sched_setscheduler) {
284 // POSIX: "If pid is zero, the scheduling policy shall be returned for the
285 // calling process".
286 ASSERT_EQ(sched_getscheduler(getpid()), sched_getscheduler(0));
287
288 const int original_policy = sched_getscheduler(getpid());
289 sched_param p = {};
290 p.sched_priority = sched_get_priority_min(original_policy);
291 errno = 0;
292 ASSERT_EQ(-1, sched_setscheduler(getpid(), INT_MAX, &p));
293 ASSERT_ERRNO(EINVAL);
294
295 ASSERT_EQ(0, sched_getparam(getpid(), &p));
296 ASSERT_EQ(original_policy, sched_setscheduler(getpid(), SCHED_BATCH, &p));
297 // POSIX says this should return the previous policy (here SCHED_BATCH),
298 // but the Linux system call doesn't, and the glibc wrapper doesn't correct
299 // this (the "returns 0" behavior is even documented on the man page in
300 // the BUGS section). This was our historical behavior too, so in the
301 // absence of reasons to break compatibility with ourselves and glibc, we
302 // don't behave as POSIX specifies. http://b/26203902.
303 ASSERT_EQ(0, sched_setscheduler(getpid(), original_policy, &p));
304 }
305
TEST(sched,sched_getaffinity_failure)306 TEST(sched, sched_getaffinity_failure) {
307 // Trivial test of the errno-preserving/returning behavior.
308 #pragma clang diagnostic push
309 #pragma clang diagnostic ignored "-Wnonnull"
310 ASSERT_EQ(-1, sched_getaffinity(getpid(), 0, nullptr));
311 ASSERT_ERRNO(EINVAL);
312 #pragma clang diagnostic pop
313 }
314
TEST(sched,sched_getaffinity)315 TEST(sched, sched_getaffinity) {
316 cpu_set_t set;
317 CPU_ZERO(&set);
318 ASSERT_EQ(0, sched_getaffinity(getpid(), sizeof(set), &set));
319 ASSERT_GT(CPU_COUNT(&set), 0);
320 }
321
TEST(sched,sched_setaffinity_failure)322 TEST(sched, sched_setaffinity_failure) {
323 // Trivial test of the errno-preserving/returning behavior.
324 #pragma clang diagnostic push
325 #pragma clang diagnostic ignored "-Wnonnull"
326 ASSERT_EQ(-1, sched_setaffinity(getpid(), 0, nullptr));
327 ASSERT_ERRNO(EINVAL);
328 #pragma clang diagnostic pop
329 }
330
TEST(sched,sched_setaffinity)331 TEST(sched, sched_setaffinity) {
332 cpu_set_t set;
333 CPU_ZERO(&set);
334 ASSERT_EQ(0, sched_getaffinity(getpid(), sizeof(set), &set));
335 // It's hard to make any more general claim than this,
336 // but it ought to be safe to ask for the same affinity you already have.
337 ASSERT_EQ(0, sched_setaffinity(getpid(), sizeof(set), &set));
338 }
339
TEST(sched,sched_getattr)340 TEST(sched, sched_getattr) {
341 #if defined(__BIONIC__)
342 struct sched_attr sa;
343 ASSERT_EQ(0, sched_getattr(getpid(), &sa, sizeof(sa), 0));
344 #else
345 GTEST_SKIP() << "our glibc is too old";
346 #endif
347 }
348
TEST(sched,sched_setattr_failure)349 TEST(sched, sched_setattr_failure) {
350 #if defined(__BIONIC__)
351 // Trivial test of the errno-preserving/returning behavior.
352 #pragma clang diagnostic push
353 #pragma clang diagnostic ignored "-Wnonnull"
354 ASSERT_EQ(-1, sched_setattr(getpid(), nullptr, 0));
355 ASSERT_ERRNO(EINVAL);
356 #pragma clang diagnostic pop
357 #else
358 GTEST_SKIP() << "our glibc is too old";
359 #endif
360 }
361