1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <elf.h>
20 #include <limits.h>
21 #include <malloc.h>
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/auxv.h>
30 #include <sys/cdefs.h>
31 #include <sys/prctl.h>
32 #include <sys/types.h>
33 #include <sys/wait.h>
34 #include <unistd.h>
35
36 #include <algorithm>
37 #include <atomic>
38 #include <functional>
39 #include <string>
40 #include <thread>
41 #include <unordered_map>
42 #include <utility>
43 #include <vector>
44
45 #include <tinyxml2.h>
46
47 #include <android-base/file.h>
48 #include <android-base/test_utils.h>
49
50 #include "utils.h"
51
52 #if defined(__BIONIC__)
53
54 #include "SignalUtils.h"
55 #include "dlext_private.h"
56
57 #include "platform/bionic/malloc.h"
58 #include "platform/bionic/mte.h"
59 #include "platform/bionic/reserved_signals.h"
60 #include "private/bionic_config.h"
61
62 #define HAVE_REALLOCARRAY 1
63
64 #elif defined(__GLIBC__)
65
66 #define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
67
68 #elif defined(ANDROID_HOST_MUSL)
69
70 #define HAVE_REALLOCARRAY 1
71
72 #endif
73
TEST(malloc,malloc_std)74 TEST(malloc, malloc_std) {
75 // Simple malloc test.
76 void *ptr = malloc(100);
77 ASSERT_TRUE(ptr != nullptr);
78 ASSERT_LE(100U, malloc_usable_size(ptr));
79 free(ptr);
80 }
81
TEST(malloc,malloc_overflow)82 TEST(malloc, malloc_overflow) {
83 SKIP_WITH_HWASAN;
84 errno = 0;
85 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
86 ASSERT_EQ(ENOMEM, errno);
87 }
88
TEST(malloc,calloc_std)89 TEST(malloc, calloc_std) {
90 // Simple calloc test.
91 size_t alloc_len = 100;
92 char *ptr = (char *)calloc(1, alloc_len);
93 ASSERT_TRUE(ptr != nullptr);
94 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
95 for (size_t i = 0; i < alloc_len; i++) {
96 ASSERT_EQ(0, ptr[i]);
97 }
98 free(ptr);
99 }
100
TEST(malloc,calloc_mem_init_disabled)101 TEST(malloc, calloc_mem_init_disabled) {
102 #if defined(__BIONIC__)
103 // calloc should still zero memory if mem-init is disabled.
104 // With jemalloc the mallopts will fail but that shouldn't affect the
105 // execution of the test.
106 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
107 size_t alloc_len = 100;
108 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
109 for (size_t i = 0; i < alloc_len; i++) {
110 ASSERT_EQ(0, ptr[i]);
111 }
112 free(ptr);
113 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
114 #else
115 GTEST_SKIP() << "bionic-only test";
116 #endif
117 }
118
TEST(malloc,calloc_illegal)119 TEST(malloc, calloc_illegal) {
120 SKIP_WITH_HWASAN;
121 errno = 0;
122 ASSERT_EQ(nullptr, calloc(-1, 100));
123 ASSERT_EQ(ENOMEM, errno);
124 }
125
TEST(malloc,calloc_overflow)126 TEST(malloc, calloc_overflow) {
127 SKIP_WITH_HWASAN;
128 errno = 0;
129 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
130 ASSERT_EQ(ENOMEM, errno);
131 errno = 0;
132 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
133 ASSERT_EQ(ENOMEM, errno);
134 errno = 0;
135 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
136 ASSERT_EQ(ENOMEM, errno);
137 errno = 0;
138 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
139 ASSERT_EQ(ENOMEM, errno);
140 }
141
TEST(malloc,memalign_multiple)142 TEST(malloc, memalign_multiple) {
143 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
144 // Memalign test where the alignment is any value.
145 for (size_t i = 0; i <= 12; i++) {
146 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
147 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
148 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
149 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
150 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
151 << "Failed at alignment " << alignment;
152 free(ptr);
153 }
154 }
155 }
156
TEST(malloc,memalign_overflow)157 TEST(malloc, memalign_overflow) {
158 SKIP_WITH_HWASAN;
159 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
160 }
161
TEST(malloc,memalign_non_power2)162 TEST(malloc, memalign_non_power2) {
163 SKIP_WITH_HWASAN;
164 void* ptr;
165 for (size_t align = 0; align <= 256; align++) {
166 ptr = memalign(align, 1024);
167 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
168 free(ptr);
169 }
170 }
171
TEST(malloc,memalign_realloc)172 TEST(malloc, memalign_realloc) {
173 // Memalign and then realloc the pointer a couple of times.
174 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
175 char *ptr = (char*)memalign(alignment, 100);
176 ASSERT_TRUE(ptr != nullptr);
177 ASSERT_LE(100U, malloc_usable_size(ptr));
178 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
179 memset(ptr, 0x23, 100);
180
181 ptr = (char*)realloc(ptr, 200);
182 ASSERT_TRUE(ptr != nullptr);
183 ASSERT_LE(200U, malloc_usable_size(ptr));
184 ASSERT_TRUE(ptr != nullptr);
185 for (size_t i = 0; i < 100; i++) {
186 ASSERT_EQ(0x23, ptr[i]);
187 }
188 memset(ptr, 0x45, 200);
189
190 ptr = (char*)realloc(ptr, 300);
191 ASSERT_TRUE(ptr != nullptr);
192 ASSERT_LE(300U, malloc_usable_size(ptr));
193 for (size_t i = 0; i < 200; i++) {
194 ASSERT_EQ(0x45, ptr[i]);
195 }
196 memset(ptr, 0x67, 300);
197
198 ptr = (char*)realloc(ptr, 250);
199 ASSERT_TRUE(ptr != nullptr);
200 ASSERT_LE(250U, malloc_usable_size(ptr));
201 for (size_t i = 0; i < 250; i++) {
202 ASSERT_EQ(0x67, ptr[i]);
203 }
204 free(ptr);
205 }
206 }
207
TEST(malloc,malloc_realloc_larger)208 TEST(malloc, malloc_realloc_larger) {
209 // Realloc to a larger size, malloc is used for the original allocation.
210 char *ptr = (char *)malloc(100);
211 ASSERT_TRUE(ptr != nullptr);
212 ASSERT_LE(100U, malloc_usable_size(ptr));
213 memset(ptr, 67, 100);
214
215 ptr = (char *)realloc(ptr, 200);
216 ASSERT_TRUE(ptr != nullptr);
217 ASSERT_LE(200U, malloc_usable_size(ptr));
218 for (size_t i = 0; i < 100; i++) {
219 ASSERT_EQ(67, ptr[i]);
220 }
221 free(ptr);
222 }
223
TEST(malloc,malloc_realloc_smaller)224 TEST(malloc, malloc_realloc_smaller) {
225 // Realloc to a smaller size, malloc is used for the original allocation.
226 char *ptr = (char *)malloc(200);
227 ASSERT_TRUE(ptr != nullptr);
228 ASSERT_LE(200U, malloc_usable_size(ptr));
229 memset(ptr, 67, 200);
230
231 ptr = (char *)realloc(ptr, 100);
232 ASSERT_TRUE(ptr != nullptr);
233 ASSERT_LE(100U, malloc_usable_size(ptr));
234 for (size_t i = 0; i < 100; i++) {
235 ASSERT_EQ(67, ptr[i]);
236 }
237 free(ptr);
238 }
239
TEST(malloc,malloc_multiple_realloc)240 TEST(malloc, malloc_multiple_realloc) {
241 // Multiple reallocs, malloc is used for the original allocation.
242 char *ptr = (char *)malloc(200);
243 ASSERT_TRUE(ptr != nullptr);
244 ASSERT_LE(200U, malloc_usable_size(ptr));
245 memset(ptr, 0x23, 200);
246
247 ptr = (char *)realloc(ptr, 100);
248 ASSERT_TRUE(ptr != nullptr);
249 ASSERT_LE(100U, malloc_usable_size(ptr));
250 for (size_t i = 0; i < 100; i++) {
251 ASSERT_EQ(0x23, ptr[i]);
252 }
253
254 ptr = (char*)realloc(ptr, 50);
255 ASSERT_TRUE(ptr != nullptr);
256 ASSERT_LE(50U, malloc_usable_size(ptr));
257 for (size_t i = 0; i < 50; i++) {
258 ASSERT_EQ(0x23, ptr[i]);
259 }
260
261 ptr = (char*)realloc(ptr, 150);
262 ASSERT_TRUE(ptr != nullptr);
263 ASSERT_LE(150U, malloc_usable_size(ptr));
264 for (size_t i = 0; i < 50; i++) {
265 ASSERT_EQ(0x23, ptr[i]);
266 }
267 memset(ptr, 0x23, 150);
268
269 ptr = (char*)realloc(ptr, 425);
270 ASSERT_TRUE(ptr != nullptr);
271 ASSERT_LE(425U, malloc_usable_size(ptr));
272 for (size_t i = 0; i < 150; i++) {
273 ASSERT_EQ(0x23, ptr[i]);
274 }
275 free(ptr);
276 }
277
TEST(malloc,calloc_realloc_larger)278 TEST(malloc, calloc_realloc_larger) {
279 // Realloc to a larger size, calloc is used for the original allocation.
280 char *ptr = (char *)calloc(1, 100);
281 ASSERT_TRUE(ptr != nullptr);
282 ASSERT_LE(100U, malloc_usable_size(ptr));
283
284 ptr = (char *)realloc(ptr, 200);
285 ASSERT_TRUE(ptr != nullptr);
286 ASSERT_LE(200U, malloc_usable_size(ptr));
287 for (size_t i = 0; i < 100; i++) {
288 ASSERT_EQ(0, ptr[i]);
289 }
290 free(ptr);
291 }
292
TEST(malloc,calloc_realloc_smaller)293 TEST(malloc, calloc_realloc_smaller) {
294 // Realloc to a smaller size, calloc is used for the original allocation.
295 char *ptr = (char *)calloc(1, 200);
296 ASSERT_TRUE(ptr != nullptr);
297 ASSERT_LE(200U, malloc_usable_size(ptr));
298
299 ptr = (char *)realloc(ptr, 100);
300 ASSERT_TRUE(ptr != nullptr);
301 ASSERT_LE(100U, malloc_usable_size(ptr));
302 for (size_t i = 0; i < 100; i++) {
303 ASSERT_EQ(0, ptr[i]);
304 }
305 free(ptr);
306 }
307
TEST(malloc,calloc_multiple_realloc)308 TEST(malloc, calloc_multiple_realloc) {
309 // Multiple reallocs, calloc is used for the original allocation.
310 char *ptr = (char *)calloc(1, 200);
311 ASSERT_TRUE(ptr != nullptr);
312 ASSERT_LE(200U, malloc_usable_size(ptr));
313
314 ptr = (char *)realloc(ptr, 100);
315 ASSERT_TRUE(ptr != nullptr);
316 ASSERT_LE(100U, malloc_usable_size(ptr));
317 for (size_t i = 0; i < 100; i++) {
318 ASSERT_EQ(0, ptr[i]);
319 }
320
321 ptr = (char*)realloc(ptr, 50);
322 ASSERT_TRUE(ptr != nullptr);
323 ASSERT_LE(50U, malloc_usable_size(ptr));
324 for (size_t i = 0; i < 50; i++) {
325 ASSERT_EQ(0, ptr[i]);
326 }
327
328 ptr = (char*)realloc(ptr, 150);
329 ASSERT_TRUE(ptr != nullptr);
330 ASSERT_LE(150U, malloc_usable_size(ptr));
331 for (size_t i = 0; i < 50; i++) {
332 ASSERT_EQ(0, ptr[i]);
333 }
334 memset(ptr, 0, 150);
335
336 ptr = (char*)realloc(ptr, 425);
337 ASSERT_TRUE(ptr != nullptr);
338 ASSERT_LE(425U, malloc_usable_size(ptr));
339 for (size_t i = 0; i < 150; i++) {
340 ASSERT_EQ(0, ptr[i]);
341 }
342 free(ptr);
343 }
344
TEST(malloc,realloc_overflow)345 TEST(malloc, realloc_overflow) {
346 SKIP_WITH_HWASAN;
347 errno = 0;
348 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
349 ASSERT_EQ(ENOMEM, errno);
350 void* ptr = malloc(100);
351 ASSERT_TRUE(ptr != nullptr);
352 errno = 0;
353 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
354 ASSERT_EQ(ENOMEM, errno);
355 free(ptr);
356 }
357
358 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
359 extern "C" void* pvalloc(size_t);
360 extern "C" void* valloc(size_t);
361 #endif
362
TEST(malloc,pvalloc_std)363 TEST(malloc, pvalloc_std) {
364 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
365 size_t pagesize = sysconf(_SC_PAGESIZE);
366 void* ptr = pvalloc(100);
367 ASSERT_TRUE(ptr != nullptr);
368 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
369 ASSERT_LE(pagesize, malloc_usable_size(ptr));
370 free(ptr);
371 #else
372 GTEST_SKIP() << "pvalloc not supported.";
373 #endif
374 }
375
TEST(malloc,pvalloc_overflow)376 TEST(malloc, pvalloc_overflow) {
377 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
378 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
379 #else
380 GTEST_SKIP() << "pvalloc not supported.";
381 #endif
382 }
383
TEST(malloc,valloc_std)384 TEST(malloc, valloc_std) {
385 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
386 size_t pagesize = sysconf(_SC_PAGESIZE);
387 void* ptr = valloc(100);
388 ASSERT_TRUE(ptr != nullptr);
389 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
390 free(ptr);
391 #else
392 GTEST_SKIP() << "valloc not supported.";
393 #endif
394 }
395
TEST(malloc,valloc_overflow)396 TEST(malloc, valloc_overflow) {
397 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
398 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
399 #else
400 GTEST_SKIP() << "valloc not supported.";
401 #endif
402 }
403
TEST(malloc,malloc_info)404 TEST(malloc, malloc_info) {
405 #ifdef __BIONIC__
406 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
407
408 TemporaryFile tf;
409 ASSERT_TRUE(tf.fd != -1);
410 FILE* fp = fdopen(tf.fd, "w+");
411 tf.release();
412 ASSERT_TRUE(fp != nullptr);
413 ASSERT_EQ(0, malloc_info(0, fp));
414 ASSERT_EQ(0, fclose(fp));
415
416 std::string contents;
417 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
418
419 tinyxml2::XMLDocument doc;
420 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
421
422 auto root = doc.FirstChildElement();
423 ASSERT_NE(nullptr, root);
424 ASSERT_STREQ("malloc", root->Name());
425 std::string version(root->Attribute("version"));
426 if (version == "jemalloc-1") {
427 auto arena = root->FirstChildElement();
428 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
429 int val;
430
431 ASSERT_STREQ("heap", arena->Name());
432 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
437 ASSERT_EQ(tinyxml2::XML_SUCCESS,
438 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
439 ASSERT_EQ(tinyxml2::XML_SUCCESS,
440 arena->FirstChildElement("bins-total")->QueryIntText(&val));
441
442 auto bin = arena->FirstChildElement("bin");
443 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
444 if (strcmp(bin->Name(), "bin") == 0) {
445 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
446 ASSERT_EQ(tinyxml2::XML_SUCCESS,
447 bin->FirstChildElement("allocated")->QueryIntText(&val));
448 ASSERT_EQ(tinyxml2::XML_SUCCESS,
449 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
450 ASSERT_EQ(tinyxml2::XML_SUCCESS,
451 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
452 }
453 }
454 }
455 } else if (version == "scudo-1") {
456 auto element = root->FirstChildElement();
457 for (; element != nullptr; element = element->NextSiblingElement()) {
458 int val;
459
460 ASSERT_STREQ("alloc", element->Name());
461 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
462 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
463 }
464 } else {
465 // Do not verify output for debug malloc.
466 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
467 }
468 #endif
469 }
470
TEST(malloc,malloc_info_matches_mallinfo)471 TEST(malloc, malloc_info_matches_mallinfo) {
472 #ifdef __BIONIC__
473 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
474
475 TemporaryFile tf;
476 ASSERT_TRUE(tf.fd != -1);
477 FILE* fp = fdopen(tf.fd, "w+");
478 tf.release();
479 ASSERT_TRUE(fp != nullptr);
480 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
481 ASSERT_EQ(0, malloc_info(0, fp));
482 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
483 ASSERT_EQ(0, fclose(fp));
484
485 std::string contents;
486 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
487
488 tinyxml2::XMLDocument doc;
489 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
490
491 size_t total_allocated_bytes = 0;
492 auto root = doc.FirstChildElement();
493 ASSERT_NE(nullptr, root);
494 ASSERT_STREQ("malloc", root->Name());
495 std::string version(root->Attribute("version"));
496 if (version == "jemalloc-1") {
497 auto arena = root->FirstChildElement();
498 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
499 int val;
500
501 ASSERT_STREQ("heap", arena->Name());
502 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
503 ASSERT_EQ(tinyxml2::XML_SUCCESS,
504 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
505 total_allocated_bytes += val;
506 ASSERT_EQ(tinyxml2::XML_SUCCESS,
507 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
508 total_allocated_bytes += val;
509 ASSERT_EQ(tinyxml2::XML_SUCCESS,
510 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
511 total_allocated_bytes += val;
512 ASSERT_EQ(tinyxml2::XML_SUCCESS,
513 arena->FirstChildElement("bins-total")->QueryIntText(&val));
514 }
515 // The total needs to be between the mallinfo call before and after
516 // since malloc_info allocates some memory.
517 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
518 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
519 } else if (version == "scudo-1") {
520 auto element = root->FirstChildElement();
521 for (; element != nullptr; element = element->NextSiblingElement()) {
522 ASSERT_STREQ("alloc", element->Name());
523 int size;
524 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
525 int count;
526 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
527 total_allocated_bytes += size * count;
528 }
529 // Scudo only gives the information on the primary, so simply make
530 // sure that the value is non-zero.
531 EXPECT_NE(0U, total_allocated_bytes);
532 } else {
533 // Do not verify output for debug malloc.
534 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
535 }
536 #endif
537 }
538
TEST(malloc,calloc_usable_size)539 TEST(malloc, calloc_usable_size) {
540 for (size_t size = 1; size <= 2048; size++) {
541 void* pointer = malloc(size);
542 ASSERT_TRUE(pointer != nullptr);
543 memset(pointer, 0xeb, malloc_usable_size(pointer));
544 free(pointer);
545
546 // We should get a previous pointer that has been set to non-zero.
547 // If calloc does not zero out all of the data, this will fail.
548 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
549 ASSERT_TRUE(pointer != nullptr);
550 size_t usable_size = malloc_usable_size(zero_mem);
551 for (size_t i = 0; i < usable_size; i++) {
552 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
553 }
554 free(zero_mem);
555 }
556 }
557
TEST(malloc,malloc_0)558 TEST(malloc, malloc_0) {
559 void* p = malloc(0);
560 ASSERT_TRUE(p != nullptr);
561 free(p);
562 }
563
TEST(malloc,calloc_0_0)564 TEST(malloc, calloc_0_0) {
565 void* p = calloc(0, 0);
566 ASSERT_TRUE(p != nullptr);
567 free(p);
568 }
569
TEST(malloc,calloc_0_1)570 TEST(malloc, calloc_0_1) {
571 void* p = calloc(0, 1);
572 ASSERT_TRUE(p != nullptr);
573 free(p);
574 }
575
TEST(malloc,calloc_1_0)576 TEST(malloc, calloc_1_0) {
577 void* p = calloc(1, 0);
578 ASSERT_TRUE(p != nullptr);
579 free(p);
580 }
581
TEST(malloc,realloc_nullptr_0)582 TEST(malloc, realloc_nullptr_0) {
583 // realloc(nullptr, size) is actually malloc(size).
584 void* p = realloc(nullptr, 0);
585 ASSERT_TRUE(p != nullptr);
586 free(p);
587 }
588
TEST(malloc,realloc_0)589 TEST(malloc, realloc_0) {
590 void* p = malloc(1024);
591 ASSERT_TRUE(p != nullptr);
592 // realloc(p, 0) is actually free(p).
593 void* p2 = realloc(p, 0);
594 ASSERT_TRUE(p2 == nullptr);
595 }
596
597 constexpr size_t MAX_LOOPS = 200;
598
599 // Make sure that memory returned by malloc is aligned to allow these data types.
TEST(malloc,verify_alignment)600 TEST(malloc, verify_alignment) {
601 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
602 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
603 long double** values_ldouble = new long double*[MAX_LOOPS];
604 // Use filler to attempt to force the allocator to get potentially bad alignments.
605 void** filler = new void*[MAX_LOOPS];
606
607 for (size_t i = 0; i < MAX_LOOPS; i++) {
608 // Check uint32_t pointers.
609 filler[i] = malloc(1);
610 ASSERT_TRUE(filler[i] != nullptr);
611
612 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
613 ASSERT_TRUE(values_32[i] != nullptr);
614 *values_32[i] = i;
615 ASSERT_EQ(*values_32[i], i);
616 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
617
618 free(filler[i]);
619 }
620
621 for (size_t i = 0; i < MAX_LOOPS; i++) {
622 // Check uint64_t pointers.
623 filler[i] = malloc(1);
624 ASSERT_TRUE(filler[i] != nullptr);
625
626 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
627 ASSERT_TRUE(values_64[i] != nullptr);
628 *values_64[i] = 0x1000 + i;
629 ASSERT_EQ(*values_64[i], 0x1000 + i);
630 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
631
632 free(filler[i]);
633 }
634
635 for (size_t i = 0; i < MAX_LOOPS; i++) {
636 // Check long double pointers.
637 filler[i] = malloc(1);
638 ASSERT_TRUE(filler[i] != nullptr);
639
640 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
641 ASSERT_TRUE(values_ldouble[i] != nullptr);
642 *values_ldouble[i] = 5.5 + i;
643 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
644 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
645 // required alignment to 0x7.
646 #if !defined(__BIONIC__) && !defined(__LP64__)
647 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
648 #else
649 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
650 #endif
651
652 free(filler[i]);
653 }
654
655 for (size_t i = 0; i < MAX_LOOPS; i++) {
656 free(values_32[i]);
657 free(values_64[i]);
658 free(values_ldouble[i]);
659 }
660
661 delete[] filler;
662 delete[] values_32;
663 delete[] values_64;
664 delete[] values_ldouble;
665 }
666
TEST(malloc,mallopt_smoke)667 TEST(malloc, mallopt_smoke) {
668 #if defined(__BIONIC__)
669 errno = 0;
670 ASSERT_EQ(0, mallopt(-1000, 1));
671 // mallopt doesn't set errno.
672 ASSERT_EQ(0, errno);
673 #else
674 GTEST_SKIP() << "bionic-only test";
675 #endif
676 }
677
TEST(malloc,mallopt_decay)678 TEST(malloc, mallopt_decay) {
679 #if defined(__BIONIC__)
680 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
681 errno = 0;
682 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
683 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
684 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
685 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
686 #else
687 GTEST_SKIP() << "bionic-only test";
688 #endif
689 }
690
TEST(malloc,mallopt_purge)691 TEST(malloc, mallopt_purge) {
692 #if defined(__BIONIC__)
693 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
694 errno = 0;
695 ASSERT_EQ(1, mallopt(M_PURGE, 0));
696 #else
697 GTEST_SKIP() << "bionic-only test";
698 #endif
699 }
700
TEST(malloc,mallopt_purge_all)701 TEST(malloc, mallopt_purge_all) {
702 #if defined(__BIONIC__)
703 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
704 errno = 0;
705 ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
706 #else
707 GTEST_SKIP() << "bionic-only test";
708 #endif
709 }
710
711 // Verify that all of the mallopt values are unique.
TEST(malloc,mallopt_unique_params)712 TEST(malloc, mallopt_unique_params) {
713 #if defined(__BIONIC__)
714 std::vector<std::pair<int, std::string>> params{
715 std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
716 std::make_pair(M_PURGE, "M_PURGE"),
717 std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
718 std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
719 std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
720 std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
721 std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
722 std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
723 std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
724 std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
725 };
726
727 std::unordered_map<int, std::string> all_params;
728 for (const auto& param : params) {
729 EXPECT_TRUE(all_params.count(param.first) == 0)
730 << "mallopt params " << all_params[param.first] << " and " << param.second
731 << " have the same value " << param.first;
732 all_params.insert(param);
733 }
734 #else
735 GTEST_SKIP() << "bionic-only test";
736 #endif
737 }
738
739 #if defined(__BIONIC__)
GetAllocatorVersion(bool * allocator_scudo)740 static void GetAllocatorVersion(bool* allocator_scudo) {
741 TemporaryFile tf;
742 ASSERT_TRUE(tf.fd != -1);
743 FILE* fp = fdopen(tf.fd, "w+");
744 tf.release();
745 ASSERT_TRUE(fp != nullptr);
746 if (malloc_info(0, fp) != 0) {
747 *allocator_scudo = false;
748 return;
749 }
750 ASSERT_EQ(0, fclose(fp));
751
752 std::string contents;
753 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
754
755 tinyxml2::XMLDocument doc;
756 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
757
758 auto root = doc.FirstChildElement();
759 ASSERT_NE(nullptr, root);
760 ASSERT_STREQ("malloc", root->Name());
761 std::string version(root->Attribute("version"));
762 *allocator_scudo = (version == "scudo-1");
763 }
764 #endif
765
TEST(malloc,mallopt_scudo_only_options)766 TEST(malloc, mallopt_scudo_only_options) {
767 #if defined(__BIONIC__)
768 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
769 bool allocator_scudo;
770 GetAllocatorVersion(&allocator_scudo);
771 if (!allocator_scudo) {
772 GTEST_SKIP() << "scudo allocator only test";
773 }
774 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
775 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
776 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
777 #else
778 GTEST_SKIP() << "bionic-only test";
779 #endif
780 }
781
TEST(malloc,reallocarray_overflow)782 TEST(malloc, reallocarray_overflow) {
783 #if HAVE_REALLOCARRAY
784 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
785 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
786 size_t b = 2;
787
788 errno = 0;
789 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
790 ASSERT_EQ(ENOMEM, errno);
791
792 errno = 0;
793 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
794 ASSERT_EQ(ENOMEM, errno);
795 #else
796 GTEST_SKIP() << "reallocarray not available";
797 #endif
798 }
799
TEST(malloc,reallocarray)800 TEST(malloc, reallocarray) {
801 #if HAVE_REALLOCARRAY
802 void* p = reallocarray(nullptr, 2, 32);
803 ASSERT_TRUE(p != nullptr);
804 ASSERT_GE(malloc_usable_size(p), 64U);
805 #else
806 GTEST_SKIP() << "reallocarray not available";
807 #endif
808 }
809
TEST(malloc,mallinfo)810 TEST(malloc, mallinfo) {
811 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
812 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
813 static size_t sizes[] = {
814 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
815 };
816
817 constexpr static size_t kMaxAllocs = 50;
818
819 for (size_t size : sizes) {
820 // If some of these allocations are stuck in a thread cache, then keep
821 // looping until we make an allocation that changes the total size of the
822 // memory allocated.
823 // jemalloc implementations counts the thread cache allocations against
824 // total memory allocated.
825 void* ptrs[kMaxAllocs] = {};
826 bool pass = false;
827 for (size_t i = 0; i < kMaxAllocs; i++) {
828 size_t allocated = mallinfo().uordblks;
829 ptrs[i] = malloc(size);
830 ASSERT_TRUE(ptrs[i] != nullptr);
831 size_t new_allocated = mallinfo().uordblks;
832 if (allocated != new_allocated) {
833 size_t usable_size = malloc_usable_size(ptrs[i]);
834 // Only check if the total got bigger by at least allocation size.
835 // Sometimes the mallinfo numbers can go backwards due to compaction
836 // and/or freeing of cached data.
837 if (new_allocated >= allocated + usable_size) {
838 pass = true;
839 break;
840 }
841 }
842 }
843 for (void* ptr : ptrs) {
844 free(ptr);
845 }
846 ASSERT_TRUE(pass)
847 << "For size " << size << " allocated bytes did not increase after "
848 << kMaxAllocs << " allocations.";
849 }
850 #else
851 GTEST_SKIP() << "glibc is broken";
852 #endif
853 }
854
TEST(malloc,mallinfo2)855 TEST(malloc, mallinfo2) {
856 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
857 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
858 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
859
860 constexpr static size_t kMaxAllocs = 50;
861
862 for (size_t size : sizes) {
863 // If some of these allocations are stuck in a thread cache, then keep
864 // looping until we make an allocation that changes the total size of the
865 // memory allocated.
866 // jemalloc implementations counts the thread cache allocations against
867 // total memory allocated.
868 void* ptrs[kMaxAllocs] = {};
869 bool pass = false;
870 for (size_t i = 0; i < kMaxAllocs; i++) {
871 struct mallinfo info = mallinfo();
872 struct mallinfo2 info2 = mallinfo2();
873 // Verify that mallinfo and mallinfo2 are exactly the same.
874 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
875 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
876 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
877 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
878 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
879 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
880 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
881 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
882 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
883 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
884
885 size_t allocated = info2.uordblks;
886 ptrs[i] = malloc(size);
887 ASSERT_TRUE(ptrs[i] != nullptr);
888
889 info = mallinfo();
890 info2 = mallinfo2();
891 // Verify that mallinfo and mallinfo2 are exactly the same.
892 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
893 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
894 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
895 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
896 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
897 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
898 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
899 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
900 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
901 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
902
903 size_t new_allocated = info2.uordblks;
904 if (allocated != new_allocated) {
905 size_t usable_size = malloc_usable_size(ptrs[i]);
906 // Only check if the total got bigger by at least allocation size.
907 // Sometimes the mallinfo2 numbers can go backwards due to compaction
908 // and/or freeing of cached data.
909 if (new_allocated >= allocated + usable_size) {
910 pass = true;
911 break;
912 }
913 }
914 }
915 for (void* ptr : ptrs) {
916 free(ptr);
917 }
918 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
919 << kMaxAllocs << " allocations.";
920 }
921 #else
922 GTEST_SKIP() << "glibc is broken";
923 #endif
924 }
925
926 template <typename Type>
VerifyAlignment(Type * floating)927 void __attribute__((optnone)) VerifyAlignment(Type* floating) {
928 size_t expected_alignment = alignof(Type);
929 if (expected_alignment != 0) {
930 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
931 << "Expected alignment " << expected_alignment << " ptr value " << floating;
932 }
933 }
934
935 template <typename Type>
TestAllocateType()936 void __attribute__((optnone)) TestAllocateType() {
937 // The number of allocations to do in a row. This is to attempt to
938 // expose the worst case alignment for native allocators that use
939 // bins.
940 static constexpr size_t kMaxConsecutiveAllocs = 100;
941
942 // Verify using new directly.
943 Type* types[kMaxConsecutiveAllocs];
944 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
945 types[i] = new Type;
946 VerifyAlignment(types[i]);
947 if (::testing::Test::HasFatalFailure()) {
948 return;
949 }
950 }
951 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
952 delete types[i];
953 }
954
955 // Verify using malloc.
956 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
957 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
958 ASSERT_TRUE(types[i] != nullptr);
959 VerifyAlignment(types[i]);
960 if (::testing::Test::HasFatalFailure()) {
961 return;
962 }
963 }
964 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
965 free(types[i]);
966 }
967
968 // Verify using a vector.
969 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
970 for (size_t i = 0; i < type_vector.size(); i++) {
971 VerifyAlignment(&type_vector[i]);
972 if (::testing::Test::HasFatalFailure()) {
973 return;
974 }
975 }
976 }
977
978 #if defined(__ANDROID__)
AndroidVerifyAlignment(size_t alloc_size,size_t aligned_bytes)979 static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
980 void* ptrs[100];
981 uintptr_t mask = aligned_bytes - 1;
982 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
983 ptrs[i] = malloc(alloc_size);
984 ASSERT_TRUE(ptrs[i] != nullptr);
985 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
986 << "Expected at least " << aligned_bytes << " byte alignment: size "
987 << alloc_size << " actual ptr " << ptrs[i];
988 }
989 }
990 #endif
991
AlignCheck()992 void AlignCheck() {
993 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
994 // for a discussion of type alignment.
995 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
996 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
997 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
998
999 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1000 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1001 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1002 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1003 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1004 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1005 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1006 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1007 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1008 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1009 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1010 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1011 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1012 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1013
1014 #if defined(__ANDROID__)
1015 // On Android, there is a lot of code that expects certain alignments:
1016 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
1017 // must have at least 16 byte alignment.
1018 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
1019 // not 16 bytes, are only required to have at least 8 byte alignment.
1020 // In addition, on Android clang has been configured for 64 bit such that:
1021 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1022 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1023 // For 32 bit environments, only the first two requirements must be met.
1024
1025 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1026 // a discussion of this alignment mess. The code below is enforcing
1027 // strong-alignment, since who knows what code depends on this behavior now.
1028 // As mentioned before, for 64 bit this will enforce the higher
1029 // requirement since clang expects this behavior on Android now.
1030 for (size_t i = 1; i <= 128; i++) {
1031 #if defined(__LP64__)
1032 if (i <= 8) {
1033 AndroidVerifyAlignment(i, 8);
1034 } else {
1035 AndroidVerifyAlignment(i, 16);
1036 }
1037 #else
1038 size_t rounded = (i + 7) & ~7;
1039 if ((rounded % 16) == 0) {
1040 AndroidVerifyAlignment(i, 16);
1041 } else {
1042 AndroidVerifyAlignment(i, 8);
1043 }
1044 #endif
1045 if (::testing::Test::HasFatalFailure()) {
1046 return;
1047 }
1048 }
1049 #endif
1050 }
1051
TEST(malloc,align_check)1052 TEST(malloc, align_check) {
1053 AlignCheck();
1054 }
1055
1056 // Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc,DISABLED_alloc_after_fork)1057 TEST(malloc, DISABLED_alloc_after_fork) {
1058 // Both of these need to be a power of 2.
1059 static constexpr size_t kMinAllocationSize = 8;
1060 static constexpr size_t kMaxAllocationSize = 2097152;
1061
1062 static constexpr size_t kNumAllocatingThreads = 5;
1063 static constexpr size_t kNumForkLoops = 100;
1064
1065 std::atomic_bool stop;
1066
1067 // Create threads that simply allocate and free different sizes.
1068 std::vector<std::thread*> threads;
1069 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1070 std::thread* t = new std::thread([&stop] {
1071 while (!stop) {
1072 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1073 void* ptr;
1074 DoNotOptimize(ptr = malloc(size));
1075 free(ptr);
1076 }
1077 }
1078 });
1079 threads.push_back(t);
1080 }
1081
1082 // Create a thread to fork and allocate.
1083 for (size_t i = 0; i < kNumForkLoops; i++) {
1084 pid_t pid;
1085 if ((pid = fork()) == 0) {
1086 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1087 void* ptr;
1088 DoNotOptimize(ptr = malloc(size));
1089 ASSERT_TRUE(ptr != nullptr);
1090 // Make sure we can touch all of the allocation.
1091 memset(ptr, 0x1, size);
1092 ASSERT_LE(size, malloc_usable_size(ptr));
1093 free(ptr);
1094 }
1095 _exit(10);
1096 }
1097 ASSERT_NE(-1, pid);
1098 AssertChildExited(pid, 10);
1099 }
1100
1101 stop = true;
1102 for (auto thread : threads) {
1103 thread->join();
1104 delete thread;
1105 }
1106 }
1107
TEST(android_mallopt,error_on_unexpected_option)1108 TEST(android_mallopt, error_on_unexpected_option) {
1109 #if defined(__BIONIC__)
1110 const int unrecognized_option = -1;
1111 errno = 0;
1112 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1113 EXPECT_EQ(ENOTSUP, errno);
1114 #else
1115 GTEST_SKIP() << "bionic-only test";
1116 #endif
1117 }
1118
IsDynamic()1119 bool IsDynamic() {
1120 #if defined(__LP64__)
1121 Elf64_Ehdr ehdr;
1122 #else
1123 Elf32_Ehdr ehdr;
1124 #endif
1125 std::string path(android::base::GetExecutablePath());
1126
1127 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1128 if (fd == -1) {
1129 // Assume dynamic on error.
1130 return true;
1131 }
1132 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1133 close(fd);
1134 // Assume dynamic in error cases.
1135 return !read_completed || ehdr.e_type == ET_DYN;
1136 }
1137
TEST(android_mallopt,init_zygote_child_profiling)1138 TEST(android_mallopt, init_zygote_child_profiling) {
1139 #if defined(__BIONIC__)
1140 // Successful call.
1141 errno = 0;
1142 if (IsDynamic()) {
1143 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1144 EXPECT_EQ(0, errno);
1145 } else {
1146 // Not supported in static executables.
1147 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1148 EXPECT_EQ(ENOTSUP, errno);
1149 }
1150
1151 // Unexpected arguments rejected.
1152 errno = 0;
1153 char unexpected = 0;
1154 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
1155 if (IsDynamic()) {
1156 EXPECT_EQ(EINVAL, errno);
1157 } else {
1158 EXPECT_EQ(ENOTSUP, errno);
1159 }
1160 #else
1161 GTEST_SKIP() << "bionic-only test";
1162 #endif
1163 }
1164
1165 #if defined(__BIONIC__)
1166 template <typename FuncType>
CheckAllocationFunction(FuncType func)1167 void CheckAllocationFunction(FuncType func) {
1168 // Assumes that no more than 108MB of memory is allocated before this.
1169 size_t limit = 128 * 1024 * 1024;
1170 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1171 if (!func(20 * 1024 * 1024))
1172 exit(1);
1173 if (func(128 * 1024 * 1024))
1174 exit(1);
1175 exit(0);
1176 }
1177 #endif
1178
TEST(android_mallopt,set_allocation_limit)1179 TEST(android_mallopt, set_allocation_limit) {
1180 #if defined(__BIONIC__)
1181 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1182 testing::ExitedWithCode(0), "");
1183 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1184 testing::ExitedWithCode(0), "");
1185 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1186 testing::ExitedWithCode(0), "");
1187 EXPECT_EXIT(CheckAllocationFunction(
1188 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1189 testing::ExitedWithCode(0), "");
1190 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1191 void* ptr;
1192 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1193 }),
1194 testing::ExitedWithCode(0), "");
1195 EXPECT_EXIT(CheckAllocationFunction(
1196 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1197 testing::ExitedWithCode(0), "");
1198 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1199 void* p = malloc(1024 * 1024);
1200 return realloc(p, bytes) != nullptr;
1201 }),
1202 testing::ExitedWithCode(0), "");
1203 #if !defined(__LP64__)
1204 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1205 testing::ExitedWithCode(0), "");
1206 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1207 testing::ExitedWithCode(0), "");
1208 #endif
1209 #else
1210 GTEST_SKIP() << "bionic extension";
1211 #endif
1212 }
1213
TEST(android_mallopt,set_allocation_limit_multiple)1214 TEST(android_mallopt, set_allocation_limit_multiple) {
1215 #if defined(__BIONIC__)
1216 // Only the first set should work.
1217 size_t limit = 256 * 1024 * 1024;
1218 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1219 limit = 32 * 1024 * 1024;
1220 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1221 #else
1222 GTEST_SKIP() << "bionic extension";
1223 #endif
1224 }
1225
1226 #if defined(__BIONIC__)
1227 static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1228
GetMaxAllocations()1229 static size_t GetMaxAllocations() {
1230 size_t max_pointers = 0;
1231 void* ptrs[20];
1232 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1233 ptrs[i] = malloc(kAllocationSize);
1234 if (ptrs[i] == nullptr) {
1235 max_pointers = i;
1236 break;
1237 }
1238 }
1239 for (size_t i = 0; i < max_pointers; i++) {
1240 free(ptrs[i]);
1241 }
1242 return max_pointers;
1243 }
1244
VerifyMaxPointers(size_t max_pointers)1245 static void VerifyMaxPointers(size_t max_pointers) {
1246 // Now verify that we can allocate the same number as before.
1247 void* ptrs[20];
1248 for (size_t i = 0; i < max_pointers; i++) {
1249 ptrs[i] = malloc(kAllocationSize);
1250 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1251 }
1252
1253 // Make sure the next allocation still fails.
1254 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1255 for (size_t i = 0; i < max_pointers; i++) {
1256 free(ptrs[i]);
1257 }
1258 }
1259 #endif
1260
TEST(android_mallopt,set_allocation_limit_realloc_increase)1261 TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1262 #if defined(__BIONIC__)
1263 size_t limit = 128 * 1024 * 1024;
1264 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1265
1266 size_t max_pointers = GetMaxAllocations();
1267 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1268
1269 void* memory = malloc(10 * 1024 * 1024);
1270 ASSERT_TRUE(memory != nullptr);
1271
1272 // Increase size.
1273 memory = realloc(memory, 20 * 1024 * 1024);
1274 ASSERT_TRUE(memory != nullptr);
1275 memory = realloc(memory, 40 * 1024 * 1024);
1276 ASSERT_TRUE(memory != nullptr);
1277 memory = realloc(memory, 60 * 1024 * 1024);
1278 ASSERT_TRUE(memory != nullptr);
1279 memory = realloc(memory, 80 * 1024 * 1024);
1280 ASSERT_TRUE(memory != nullptr);
1281 // Now push past limit.
1282 memory = realloc(memory, 130 * 1024 * 1024);
1283 ASSERT_TRUE(memory == nullptr);
1284
1285 VerifyMaxPointers(max_pointers);
1286 #else
1287 GTEST_SKIP() << "bionic extension";
1288 #endif
1289 }
1290
TEST(android_mallopt,set_allocation_limit_realloc_decrease)1291 TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1292 #if defined(__BIONIC__)
1293 size_t limit = 100 * 1024 * 1024;
1294 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1295
1296 size_t max_pointers = GetMaxAllocations();
1297 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1298
1299 void* memory = malloc(80 * 1024 * 1024);
1300 ASSERT_TRUE(memory != nullptr);
1301
1302 // Decrease size.
1303 memory = realloc(memory, 60 * 1024 * 1024);
1304 ASSERT_TRUE(memory != nullptr);
1305 memory = realloc(memory, 40 * 1024 * 1024);
1306 ASSERT_TRUE(memory != nullptr);
1307 memory = realloc(memory, 20 * 1024 * 1024);
1308 ASSERT_TRUE(memory != nullptr);
1309 memory = realloc(memory, 10 * 1024 * 1024);
1310 ASSERT_TRUE(memory != nullptr);
1311 free(memory);
1312
1313 VerifyMaxPointers(max_pointers);
1314 #else
1315 GTEST_SKIP() << "bionic extension";
1316 #endif
1317 }
1318
TEST(android_mallopt,set_allocation_limit_realloc_free)1319 TEST(android_mallopt, set_allocation_limit_realloc_free) {
1320 #if defined(__BIONIC__)
1321 size_t limit = 100 * 1024 * 1024;
1322 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1323
1324 size_t max_pointers = GetMaxAllocations();
1325 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1326
1327 void* memory = malloc(60 * 1024 * 1024);
1328 ASSERT_TRUE(memory != nullptr);
1329
1330 memory = realloc(memory, 0);
1331 ASSERT_TRUE(memory == nullptr);
1332
1333 VerifyMaxPointers(max_pointers);
1334 #else
1335 GTEST_SKIP() << "bionic extension";
1336 #endif
1337 }
1338
1339 #if defined(__BIONIC__)
SetAllocationLimit(void * data)1340 static void* SetAllocationLimit(void* data) {
1341 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1342 while (!go->load()) {
1343 }
1344 size_t limit = 500 * 1024 * 1024;
1345 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1346 return reinterpret_cast<void*>(-1);
1347 }
1348 return nullptr;
1349 }
1350
SetAllocationLimitMultipleThreads()1351 static void SetAllocationLimitMultipleThreads() {
1352 std::atomic_bool go;
1353 go = false;
1354
1355 static constexpr size_t kNumThreads = 4;
1356 pthread_t threads[kNumThreads];
1357 for (size_t i = 0; i < kNumThreads; i++) {
1358 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1359 }
1360
1361 // Let them go all at once.
1362 go = true;
1363 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1364 // heapprofd handler.
1365 union sigval signal_value;
1366 signal_value.sival_int = 0;
1367 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
1368
1369 size_t num_successful = 0;
1370 for (size_t i = 0; i < kNumThreads; i++) {
1371 void* result;
1372 ASSERT_EQ(0, pthread_join(threads[i], &result));
1373 if (result != nullptr) {
1374 num_successful++;
1375 }
1376 }
1377 ASSERT_EQ(1U, num_successful);
1378 exit(0);
1379 }
1380 #endif
1381
TEST(android_mallopt,set_allocation_limit_multiple_threads)1382 TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1383 #if defined(__BIONIC__)
1384 if (IsDynamic()) {
1385 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1386 }
1387
1388 // Run this a number of times as a stress test.
1389 for (size_t i = 0; i < 100; i++) {
1390 // Not using ASSERT_EXIT because errors messages are not displayed.
1391 pid_t pid;
1392 if ((pid = fork()) == 0) {
1393 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1394 }
1395 ASSERT_NE(-1, pid);
1396 int status;
1397 ASSERT_EQ(pid, wait(&status));
1398 ASSERT_EQ(0, WEXITSTATUS(status));
1399 }
1400 #else
1401 GTEST_SKIP() << "bionic extension";
1402 #endif
1403 }
1404
1405 #if defined(__BIONIC__)
1406 using Action = android_mallopt_gwp_asan_options_t::Action;
TEST(android_mallopt,DISABLED_multiple_enable_gwp_asan)1407 TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1408 android_mallopt_gwp_asan_options_t options;
1409 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
1410 options.desire = Action::DONT_TURN_ON_UNLESS_OVERRIDDEN;
1411 // GWP-ASan should already be enabled. Trying to enable or disable it should
1412 // always pass.
1413 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1414 options.desire = Action::TURN_ON_WITH_SAMPLING;
1415 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1416 }
1417 #endif // defined(__BIONIC__)
1418
TEST(android_mallopt,multiple_enable_gwp_asan)1419 TEST(android_mallopt, multiple_enable_gwp_asan) {
1420 #if defined(__BIONIC__)
1421 // Always enable GWP-Asan, with default options.
1422 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
1423 #else
1424 GTEST_SKIP() << "bionic extension";
1425 #endif
1426 }
1427
TEST(android_mallopt,memtag_stack_is_on)1428 TEST(android_mallopt, memtag_stack_is_on) {
1429 #if defined(__BIONIC__)
1430 bool memtag_stack;
1431 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1432 #else
1433 GTEST_SKIP() << "bionic extension";
1434 #endif
1435 }
1436
TestHeapZeroing(int num_iterations,int (* get_alloc_size)(int iteration))1437 void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1438 std::vector<void*> allocs;
1439 constexpr int kMaxBytesToCheckZero = 64;
1440 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1441
1442 for (int i = 0; i < num_iterations; ++i) {
1443 int size = get_alloc_size(i);
1444 allocs.push_back(malloc(size));
1445 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1446 }
1447
1448 for (void* alloc : allocs) {
1449 free(alloc);
1450 }
1451 allocs.clear();
1452
1453 for (int i = 0; i < num_iterations; ++i) {
1454 int size = get_alloc_size(i);
1455 allocs.push_back(malloc(size));
1456 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1457 }
1458
1459 for (void* alloc : allocs) {
1460 free(alloc);
1461 }
1462 }
1463
TEST(malloc,zero_init)1464 TEST(malloc, zero_init) {
1465 #if defined(__BIONIC__)
1466 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1467 bool allocator_scudo;
1468 GetAllocatorVersion(&allocator_scudo);
1469 if (!allocator_scudo) {
1470 GTEST_SKIP() << "scudo allocator only test";
1471 }
1472
1473 mallopt(M_BIONIC_ZERO_INIT, 1);
1474
1475 // Test using a block of 4K small (1-32 byte) allocations.
1476 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1477 return 1 + iteration % 32;
1478 });
1479
1480 // Also test large allocations that land in the scudo secondary, as this is
1481 // the only part of Scudo that's changed by enabling zero initialization with
1482 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1483 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1484 // mallopt_decay. Ensure that we delay for at least a second before releasing
1485 // pages to the OS in order to avoid implicit zeroing by the kernel.
1486 mallopt(M_DECAY_TIME, 1000);
1487 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1488 return 1 << (19 + iteration % 4);
1489 });
1490
1491 #else
1492 GTEST_SKIP() << "bionic-only test";
1493 #endif
1494 }
1495
1496 // Note that MTE is enabled on cc_tests on devices that support MTE.
TEST(malloc,disable_mte)1497 TEST(malloc, disable_mte) {
1498 #if defined(__BIONIC__)
1499 if (!mte_supported()) {
1500 GTEST_SKIP() << "This function can only be tested with MTE";
1501 }
1502
1503 sem_t sem;
1504 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1505
1506 pthread_t thread;
1507 ASSERT_EQ(0, pthread_create(
1508 &thread, nullptr,
1509 [](void* ptr) -> void* {
1510 auto* sem = reinterpret_cast<sem_t*>(ptr);
1511 sem_wait(sem);
1512 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1513 },
1514 &sem));
1515
1516 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
1517 ASSERT_EQ(0, sem_post(&sem));
1518
1519 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1520 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1521
1522 void* retval;
1523 ASSERT_EQ(0, pthread_join(thread, &retval));
1524 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1525 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
1526 #else
1527 GTEST_SKIP() << "bionic extension";
1528 #endif
1529 }
1530
TEST(malloc,allocation_slack)1531 TEST(malloc, allocation_slack) {
1532 #if defined(__BIONIC__)
1533 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1534
1535 bool allocator_scudo;
1536 GetAllocatorVersion(&allocator_scudo);
1537 if (!allocator_scudo) {
1538 GTEST_SKIP() << "scudo allocator only test";
1539 }
1540
1541 // Test that older target SDK levels let you access a few bytes off the end of
1542 // a large allocation.
1543 android_set_application_target_sdk_version(29);
1544 auto p = std::make_unique<char[]>(131072);
1545 volatile char *vp = p.get();
1546 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1547 #else
1548 GTEST_SKIP() << "bionic extension";
1549 #endif
1550 }
1551
1552 // Regression test for b/206701345 -- scudo bug, MTE only.
1553 // Fix: https://reviews.llvm.org/D105261
1554 // Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
TEST(malloc,realloc_mte_crash_b206701345)1555 TEST(malloc, realloc_mte_crash_b206701345) {
1556 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1557 // all size classes allow such placement - mmap size has to be divisible by
1558 // the block size. At the time of writing this could only be reproduced with
1559 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1560 // future. Try several different classes at the lower end.
1561 std::vector<void*> ptrs(10000);
1562 for (int i = 1; i < 32; ++i) {
1563 size_t sz = 16 * i - 1;
1564 for (void*& p : ptrs) {
1565 p = realloc(malloc(sz), sz + 1);
1566 }
1567
1568 for (void* p : ptrs) {
1569 free(p);
1570 }
1571 }
1572 }
1573
VerifyAllocationsAreZero(std::function<void * (size_t)> alloc_func,std::string function_name,std::vector<size_t> & test_sizes,size_t max_allocations)1574 void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1575 std::vector<size_t>& test_sizes, size_t max_allocations) {
1576 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1577 std::vector<char> zero(test_sizes.back() * 2, 0);
1578
1579 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1580
1581 for (size_t test_size : test_sizes) {
1582 std::vector<void*> ptrs(max_allocations);
1583 for (size_t i = 0; i < ptrs.size(); i++) {
1584 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1585 ptrs[i] = alloc_func(test_size);
1586 ASSERT_TRUE(ptrs[i] != nullptr);
1587 size_t alloc_size = malloc_usable_size(ptrs[i]);
1588 ASSERT_LE(alloc_size, zero.size());
1589 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1590
1591 // Set the memory to non-zero to make sure if the pointer
1592 // is reused it's still zero.
1593 memset(ptrs[i], 0xab, alloc_size);
1594 }
1595 // Free the pointers.
1596 for (size_t i = 0; i < ptrs.size(); i++) {
1597 free(ptrs[i]);
1598 }
1599 for (size_t i = 0; i < ptrs.size(); i++) {
1600 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1601 ptrs[i] = malloc(test_size);
1602 ASSERT_TRUE(ptrs[i] != nullptr);
1603 size_t alloc_size = malloc_usable_size(ptrs[i]);
1604 ASSERT_LE(alloc_size, zero.size());
1605 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1606 }
1607 // Free all of the pointers later to maximize the chance of reusing from
1608 // the first loop.
1609 for (size_t i = 0; i < ptrs.size(); i++) {
1610 free(ptrs[i]);
1611 }
1612 }
1613 }
1614
1615 // Verify that small and medium allocations are always zero.
1616 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_small_medium_sizes)1617 TEST(malloc, zeroed_allocations_small_medium_sizes) {
1618 #if !defined(__BIONIC__)
1619 GTEST_SKIP() << "Only valid on bionic";
1620 #endif
1621
1622 if (IsLowRamDevice()) {
1623 GTEST_SKIP() << "Skipped on low memory devices.";
1624 }
1625
1626 constexpr size_t kMaxAllocations = 1024;
1627 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1628 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1629 kMaxAllocations);
1630
1631 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1632 test_sizes, kMaxAllocations);
1633
1634 VerifyAllocationsAreZero(
1635 [](size_t size) -> void* {
1636 void* ptr;
1637 if (posix_memalign(&ptr, 64, size) == 0) {
1638 return ptr;
1639 }
1640 return nullptr;
1641 },
1642 "posix_memalign", test_sizes, kMaxAllocations);
1643 }
1644
1645 // Verify that large allocations are always zero.
1646 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_large_sizes)1647 TEST(malloc, zeroed_allocations_large_sizes) {
1648 #if !defined(__BIONIC__)
1649 GTEST_SKIP() << "Only valid on bionic";
1650 #endif
1651
1652 if (IsLowRamDevice()) {
1653 GTEST_SKIP() << "Skipped on low memory devices.";
1654 }
1655
1656 constexpr size_t kMaxAllocations = 20;
1657 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1658 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1659 kMaxAllocations);
1660
1661 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1662 test_sizes, kMaxAllocations);
1663
1664 VerifyAllocationsAreZero(
1665 [](size_t size) -> void* {
1666 void* ptr;
1667 if (posix_memalign(&ptr, 64, size) == 0) {
1668 return ptr;
1669 }
1670 return nullptr;
1671 },
1672 "posix_memalign", test_sizes, kMaxAllocations);
1673 }
1674
1675 // Verify that reallocs are zeroed when expanded.
1676 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_realloc)1677 TEST(malloc, zeroed_allocations_realloc) {
1678 #if !defined(__BIONIC__)
1679 GTEST_SKIP() << "Only valid on bionic";
1680 #endif
1681
1682 if (IsLowRamDevice()) {
1683 GTEST_SKIP() << "Skipped on low memory devices.";
1684 }
1685
1686 // Vector of zero'd data used for comparisons.
1687 constexpr size_t kMaxMemorySize = 131072;
1688 std::vector<char> zero(kMaxMemorySize, 0);
1689
1690 constexpr size_t kMaxAllocations = 1024;
1691 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1692 // Do a number of allocations and set them to non-zero.
1693 for (size_t test_size : test_sizes) {
1694 std::vector<void*> ptrs(kMaxAllocations);
1695 for (size_t i = 0; i < kMaxAllocations; i++) {
1696 ptrs[i] = malloc(test_size);
1697 ASSERT_TRUE(ptrs[i] != nullptr);
1698
1699 // Set the memory to non-zero to make sure if the pointer
1700 // is reused it's still zero.
1701 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1702 }
1703 // Free the pointers.
1704 for (size_t i = 0; i < kMaxAllocations; i++) {
1705 free(ptrs[i]);
1706 }
1707 }
1708
1709 // Do the reallocs to a larger size and verify the rest of the allocation
1710 // is zero.
1711 constexpr size_t kInitialSize = 8;
1712 for (size_t test_size : test_sizes) {
1713 std::vector<void*> ptrs(kMaxAllocations);
1714 for (size_t i = 0; i < kMaxAllocations; i++) {
1715 ptrs[i] = malloc(kInitialSize);
1716 ASSERT_TRUE(ptrs[i] != nullptr);
1717 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1718
1719 ptrs[i] = realloc(ptrs[i], test_size);
1720 ASSERT_TRUE(ptrs[i] != nullptr);
1721 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1722 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1723 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1724 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1725 }
1726 for (size_t i = 0; i < kMaxAllocations; i++) {
1727 free(ptrs[i]);
1728 }
1729 }
1730 }
1731