Home
last modified time | relevance | path

Searched refs:threads (Results 1 – 20 of 20) sorted by relevance

/bionic/tests/
Dthreads_test.cpp31 #if __has_include(<threads.h>)
67 TEST(threads, call_once) { in TEST() argument
81 TEST(threads, cnd_broadcast__cnd_wait) { in TEST() argument
117 TEST(threads, cnd_init__cnd_destroy) { in TEST() argument
127 TEST(threads, cnd_signal__cnd_wait) { in TEST() argument
177 TEST(threads, cnd_timedwait_timedout) { in TEST() argument
193 TEST(threads, cnd_timedwait) { in TEST() argument
223 TEST(threads, mtx_init) { in TEST() argument
237 TEST(threads, mtx_destroy) { in TEST() argument
247 TEST(threads, mtx_lock_plain) { in TEST() argument
[all …]
Dmalloc_stress_test.cpp51 std::vector<std::thread*> threads; in TEST() local
53 threads.push_back(new std::thread([]() { in TEST()
65 for (auto thread : threads) { in TEST()
69 threads.clear(); in TEST()
Dleak_test.cpp141 struct thread_data { pthread_barrier_t* barrier; pid_t* tid; } threads[thread_count]; in TEST() local
149 threads[i] = {&barrier, &tids[i]}; in TEST()
157 ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &threads[i])); in TEST()
Dsetjmp_test.cpp335 pthread_t threads[kNumThreads]; in TEST() local
339 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, jumper, &var)); in TEST()
340 tids[i] = pthread_gettid_np(threads[i]); in TEST()
361 pthread_join(threads[i], nullptr); in TEST()
Difaddrs_test.cpp277 std::vector<std::thread*> threads; in TEST() local
279 threads.push_back(new std::thread([]() { in TEST()
285 for (auto& t : threads) { in TEST()
Dmalloc_test.cpp1078 std::vector<std::thread*> threads; in TEST() local
1089 threads.push_back(t); in TEST()
1112 for (auto thread : threads) { in TEST()
1355 std::unique_ptr<std::thread> threads[kNumThreads]; in SetAllocationLimitMultipleThreads() local
1357 threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] { in SetAllocationLimitMultipleThreads()
1384 threads[i]->join(); in SetAllocationLimitMultipleThreads()
Dpthread_test.cpp2809 std::vector<pthread_t> threads(data.thread_count); in TEST() local
2810 std::vector<BarrierTestHelperArg> args(threads.size()); in TEST()
2811 for (size_t i = 0; i < threads.size(); ++i) { in TEST()
2814 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST()
2817 for (size_t i = 0; i < threads.size(); ++i) { in TEST()
2818 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST()
2880 std::vector<pthread_t> threads(THREAD_COUNT); in TEST() local
2887 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST()
2892 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST()
/bionic/benchmarks/
Dmalloc_benchmark.cpp85 std::thread* threads[kMaxThreads]; in RunThreadsThroughput() local
139 for (size_t i = 0; i < num_threads; ++i) threads[i] = new std::thread(thread_task, i); in RunThreadsThroughput()
151 threads[i]->join(); in RunThreadsThroughput()
152 delete threads[i]; in RunThreadsThroughput()
Dmalloc_rss_benchmark.cpp105 std::thread* threads[kMaxThreads]; in StressSizeClass() local
106 for (size_t i = 0; i < numThreads; ++i) threads[i] = new std::thread(ThreadTask, i, allocSize); in StressSizeClass()
109 threads[i]->join(); in StressSizeClass()
110 delete threads[i]; in StressSizeClass()
/bionic/libc/async_safe/
DREADME.md6 it among threads, whereas these functions connect to liblog for each log message. While it's
10 threads. Therefore, we maintain these two separate mechanisms.
/bionic/tools/versioner/src/
DDriver.cpp214 std::vector<std::thread> threads; in initializeTargetCC1FlagCache() local
216 threads.emplace_back([type, &vfs, &reqs]() { in initializeTargetCC1FlagCache()
226 for (auto& thread : threads) { in initializeTargetCC1FlagCache()
Dversioner.cpp194 std::vector<std::thread> threads; in compileHeaders() local
241 threads.emplace_back([&jobs, &job_index, &result, vfs]() { in compileHeaders()
255 for (auto& thread : threads) { in compileHeaders()
258 threads.clear(); in compileHeaders()
/bionic/tests/headers/posix/
Dthreads_h.c29 #if __has_include(<threads.h>)
/bionic/docs/
Dfdsan.md14 For example, given two threads running the following code:
113 std::vector<std::thread> threads;
115 threads.emplace_back(function);
117 for (auto& thread : threads) {
123 When running the program, the threads' executions will be interleaved as follows:
Dnative_allocator.md33 This function, when called, should pause all threads that are making a
35 is made to `malloc_enable`, the paused threads should start running again.
318 mechanism will simulate this by creating threads and replaying the operations
321 in all threads since it collapses all of the allocation operations to occur
322 one after another. This will cause a lot of threads allocating at the same
DEINTR.md48 to interrupt another thread (in fact, that's how interruption of threads
Delf-tls.md221 `dlopen` can initialize the new static TLS memory in all existing threads. A thread list could be
455 `thread_local` | - C11: a macro for `_Thread_local` via `threads.h`<br/> - C++11: a keyword, allo…
562 On the other hand, maybe lazy allocation is a feature, because not all threads will use a dlopen'ed
567 > up the process. It would be a waste of memory and time to allocate the storage for all threads. A
570 > alternative to stopping all threads and allocating storage for all threads before letting them run
Dstatus.md111 * Full C11 `<threads.h>` (available as inlines for older API levels).
/bionic/libc/malloc_debug/tests/
Dmalloc_debug_unit_tests.cpp1100 std::vector<std::thread*> threads(1000); in TEST_F() local
1101 for (size_t i = 0; i < threads.size(); i++) { in TEST_F()
1102 threads[i] = new std::thread([](){ in TEST_F()
1110 for (size_t i = 0; i < threads.size(); i++) { in TEST_F()
1111 threads[i]->join(); in TEST_F()
1112 delete threads[i]; in TEST_F()
/bionic/libc/
DAndroid.bp979 "bionic/threads.cpp",