1 /*
2 * Copyright (c) 2023-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "libpandabase/taskmanager/task_scheduler.h"
17 #include "libpandabase/taskmanager/task_queue.h"
18 #include "libpandabase/taskmanager/task.h"
19 #include <tuple>
20 #include <gtest/gtest.h>
21
22 namespace ark::taskmanager {
23
24 constexpr size_t DEFAULT_SEED = 123456U;
25 constexpr size_t TIMEOUT = 1U;
26
27 class TaskSchedulerTest : public testing::TestWithParam<TaskTimeStatsType> {
28 public:
29 static constexpr TaskProperties GC_STATIC_VM_BACKGROUND_PROPERTIES {TaskType::GC, VMType::STATIC_VM,
30 TaskExecutionMode::BACKGROUND};
31 static constexpr TaskProperties GC_STATIC_VM_FOREGROUND_PROPERTIES {TaskType::GC, VMType::STATIC_VM,
32 TaskExecutionMode::FOREGROUND};
33 static constexpr TaskProperties JIT_STATIC_VM_BACKGROUND_PROPERTIES {TaskType::JIT, VMType::STATIC_VM,
34 TaskExecutionMode::BACKGROUND};
TaskSchedulerTest()35 TaskSchedulerTest()
36 {
37 #ifdef PANDA_NIGHTLY_TEST_ON
38 seed_ = std::time(NULL);
39 #else
40 seed_ = DEFAULT_SEED;
41 #endif
42 };
43 ~TaskSchedulerTest() override = default;
44
45 NO_COPY_SEMANTIC(TaskSchedulerTest);
46 NO_MOVE_SEMANTIC(TaskSchedulerTest);
47
48 static constexpr size_t THREADED_TASKS_COUNT = 100'000U;
49
CreateTaskProducerThread(TaskQueueInterface * queue,TaskExecutionMode mode)50 std::thread *CreateTaskProducerThread(TaskQueueInterface *queue, TaskExecutionMode mode)
51 {
52 return new std::thread(
53 [queue, mode](TaskSchedulerTest *test) {
54 for (size_t i = 0; i < THREADED_TASKS_COUNT; i++) {
55 queue->AddTask(Task::Create({queue->GetTaskType(), queue->GetVMType(), mode},
56 [test]() { test->IncrementGlobalCounter(); }));
57 }
58 test->AddedSetOfTasks();
59 },
60 this);
61 }
62
IncrementGlobalCounter()63 void IncrementGlobalCounter()
64 {
65 globalCounter_++;
66 }
67
GetGlobalCounter() const68 size_t GetGlobalCounter() const
69 {
70 return globalCounter_;
71 }
72
SetTasksSetCount(size_t setCount)73 void SetTasksSetCount(size_t setCount)
74 {
75 tasksCount_ = setCount;
76 }
77
78 /// Wait for all tasks would be added in queues
WaitAllTask()79 void WaitAllTask()
80 {
81 os::memory::LockHolder lockHolder(tasksMutex_);
82 while (tasksSetAdded_ != tasksCount_) {
83 tasksCondVar_.TimedWait(&tasksMutex_, TIMEOUT);
84 }
85 }
86
AddedSetOfTasks()87 void AddedSetOfTasks()
88 {
89 os::memory::LockHolder lockHolder(tasksMutex_);
90 tasksSetAdded_++;
91 tasksCondVar_.SignalAll();
92 }
93
GetSeed() const94 size_t GetSeed() const
95 {
96 return seed_;
97 }
98
99 private:
100 os::memory::Mutex lock_;
101 os::memory::ConditionVariable condVar_;
102 std::atomic_size_t globalCounter_ = 0U;
103
104 size_t tasksCount_ = 0U;
105 std::atomic_size_t tasksSetAdded_ = 0U;
106 os::memory::Mutex tasksMutex_;
107 os::memory::ConditionVariable tasksCondVar_;
108
109 size_t seed_ = 0;
110 };
111
TEST_F(TaskSchedulerTest,TaskQueueRegistration)112 TEST_F(TaskSchedulerTest, TaskQueueRegistration)
113 {
114 constexpr size_t THREADS_COUNT = 4U;
115 auto *tm = TaskScheduler::Create(THREADS_COUNT);
116 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::MAX_PRIORITY;
117 TaskQueueInterface *queue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
118 EXPECT_NE(queue, nullptr);
119 EXPECT_EQ(tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY), nullptr);
120 tm->UnregisterAndDestroyTaskQueue<>(queue);
121 TaskScheduler::Destroy();
122 }
123
TEST_F(TaskSchedulerTest,TaskQueuesFillingFromOwner)124 TEST_F(TaskSchedulerTest, TaskQueuesFillingFromOwner)
125 {
126 srand(GetSeed());
127 // Create TaskScheduler
128 constexpr size_t THREADS_COUNT = 4U;
129 auto *tm = TaskScheduler::Create(THREADS_COUNT);
130 // Create and register 2 queues
131 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
132 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
133 TaskQueueInterface *jitQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::JIT, VMType::STATIC_VM, QUEUE_PRIORITY);
134 // Initialize tm workers
135 tm->Initialize();
136 // Fill queues with tasks that increment counter with its type.
137 constexpr size_t COUNT_OF_TASK = 10U;
138 std::array<std::atomic_size_t, 2U> counters = {0U, 0U};
139 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
140 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&counters]() {
141 constexpr size_t GC_COUNTER = 0U;
142 // Atomic with relaxed order reason: data race with counters[GC_COUNTER] with no synchronization or ordering
143 // constraints
144 counters[GC_COUNTER].fetch_add(1U, std::memory_order_relaxed);
145 }));
146 jitQueue->AddTask(Task::Create(JIT_STATIC_VM_BACKGROUND_PROPERTIES, [&counters]() {
147 constexpr size_t JIT_COUNTER = 1U;
148 // Atomic with relaxed order reason: data race with counters[JIT_COUNTER] with no synchronization or
149 // ordering constraints
150 counters[JIT_COUNTER].fetch_add(1U, std::memory_order_relaxed);
151 }));
152 }
153 tm->Finalize();
154 for (auto &counter : counters) {
155 ASSERT_EQ(counter, COUNT_OF_TASK) << "seed:" << GetSeed();
156 }
157 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
158 tm->UnregisterAndDestroyTaskQueue<>(jitQueue);
159 TaskScheduler::Destroy();
160 }
161
TEST_F(TaskSchedulerTest,ForegroundQueueTest)162 TEST_F(TaskSchedulerTest, ForegroundQueueTest)
163 {
164 srand(GetSeed());
165 // Create TaskScheduler
166 constexpr size_t THREADS_COUNT = 1U; // IMPORTANT: only one worker to see effect of using foreground execution mode
167 auto *tm = TaskScheduler::Create(THREADS_COUNT);
168 // Create and register 2 queues
169 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
170 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
171
172 // Fill queues with tasks that push their TaskExecutionMode to global queue.
173 std::queue<TaskExecutionMode> globalQueue;
174 gcQueue->AddTask(Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::BACKGROUND},
175 [&globalQueue]() { globalQueue.push(TaskExecutionMode::BACKGROUND); }));
176 gcQueue->AddTask(Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::BACKGROUND},
177 [&globalQueue]() { globalQueue.push(TaskExecutionMode::BACKGROUND); }));
178 gcQueue->AddTask(Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::FOREGROUND},
179 [&globalQueue]() { globalQueue.push(TaskExecutionMode::FOREGROUND); }));
180 gcQueue->AddTask(Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::BACKGROUND},
181 [&globalQueue]() { globalQueue.push(TaskExecutionMode::BACKGROUND); }));
182 // Initialize tm workers
183 tm->Initialize();
184 // Wait that do work
185 tm->Finalize();
186
187 ASSERT_EQ(globalQueue.front(), TaskExecutionMode::FOREGROUND) << "seed:" << GetSeed();
188 globalQueue.pop();
189 ASSERT_EQ(globalQueue.front(), TaskExecutionMode::BACKGROUND) << "seed:" << GetSeed();
190 globalQueue.pop();
191 ASSERT_EQ(globalQueue.front(), TaskExecutionMode::BACKGROUND) << "seed:" << GetSeed();
192 globalQueue.pop();
193 ASSERT_EQ(globalQueue.front(), TaskExecutionMode::BACKGROUND) << "seed:" << GetSeed();
194 globalQueue.pop();
195 ASSERT_TRUE(globalQueue.empty());
196 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
197 TaskScheduler::Destroy();
198 }
199
TEST_F(TaskSchedulerTest,TaskCreateTask)200 TEST_F(TaskSchedulerTest, TaskCreateTask)
201 {
202 srand(GetSeed());
203 // Create TaskScheduler
204 constexpr size_t THREADS_COUNT = 4U;
205 auto *tm = TaskScheduler::Create(THREADS_COUNT);
206 // Create and register 2 queues
207 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
208 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
209 TaskQueueInterface *jitQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::JIT, VMType::STATIC_VM, QUEUE_PRIORITY);
210
211 // Initialize tm workers
212 tm->Initialize();
213 // Fill queues with tasks that increment counter with its type. GC task will add JIT task in MT.
214 std::array<std::atomic_size_t, 2U> counters = {0U, 0U};
215 constexpr size_t COUNT_OF_TASK = 10U;
216 for (size_t i = 0; i < COUNT_OF_TASK; i++) {
217 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&counters, &jitQueue]() {
218 constexpr size_t GC_COUNTER = 0U;
219 // Atomic with relaxed order reason: data race with counters[GC_COUNTER] with no synchronization or ordering
220 // constraints
221 counters[GC_COUNTER].fetch_add(1U, std::memory_order_relaxed);
222 jitQueue->AddTask(
223 Task::Create({TaskType::JIT, VMType::STATIC_VM, TaskExecutionMode::BACKGROUND}, [&counters]() {
224 constexpr size_t JIT_COUNTER = 1U;
225 // Atomic with relaxed order reason: data race with counters[JIT_COUNTER] with no synchronization or
226 // ordering constraints
227 counters[JIT_COUNTER].fetch_add(1U, std::memory_order_relaxed);
228 }));
229 }));
230 }
231 tm->Finalize();
232 for (auto &counter : counters) {
233 ASSERT_EQ(counter, COUNT_OF_TASK) << "seed:" << GetSeed();
234 }
235 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
236 tm->UnregisterAndDestroyTaskQueue<>(jitQueue);
237 TaskScheduler::Destroy();
238 }
239
TEST_F(TaskSchedulerTest,MultithreadingUsage)240 TEST_F(TaskSchedulerTest, MultithreadingUsage)
241 {
242 srand(GetSeed());
243 // Create TaskScheduler
244 constexpr size_t THREADS_COUNT = 4U;
245 auto *tm = TaskScheduler::Create(THREADS_COUNT);
246 // Create 4 thread. Each thread create, register and fill queues
247 constexpr size_t PRODUCER_THREADS_COUNT = 3U;
248 SetTasksSetCount(PRODUCER_THREADS_COUNT);
249 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
250 auto *jitStaticQueue = tm->CreateAndRegisterTaskQueue(TaskType::JIT, VMType::STATIC_VM, QUEUE_PRIORITY);
251 auto *gcStaticQueue = tm->CreateAndRegisterTaskQueue(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
252 auto *gcDynamicQueue = tm->CreateAndRegisterTaskQueue(TaskType::GC, VMType::DYNAMIC_VM, QUEUE_PRIORITY);
253
254 auto jitStaticThread = CreateTaskProducerThread(jitStaticQueue, TaskExecutionMode::BACKGROUND);
255 auto gcStaticThread = CreateTaskProducerThread(gcStaticQueue, TaskExecutionMode::BACKGROUND);
256 auto gcDynamicThread = CreateTaskProducerThread(gcDynamicQueue, TaskExecutionMode::BACKGROUND);
257
258 tm->Initialize();
259 /* Wait for all tasks would be added before tm->Finalize */
260 WaitAllTask();
261 tm->Finalize();
262
263 ASSERT_EQ(GetGlobalCounter(), THREADED_TASKS_COUNT * PRODUCER_THREADS_COUNT) << "seed:" << GetSeed();
264
265 jitStaticThread->join();
266 gcStaticThread->join();
267 gcDynamicThread->join();
268
269 delete jitStaticThread;
270 delete gcStaticThread;
271 delete gcDynamicThread;
272
273 tm->UnregisterAndDestroyTaskQueue(jitStaticQueue);
274 tm->UnregisterAndDestroyTaskQueue(gcStaticQueue);
275 tm->UnregisterAndDestroyTaskQueue(gcDynamicQueue);
276 TaskScheduler::Destroy();
277 }
278
TEST_F(TaskSchedulerTest,TaskSchedulerGetTask)279 TEST_F(TaskSchedulerTest, TaskSchedulerGetTask)
280 {
281 srand(GetSeed());
282 // Create TaskScheduler
283 constexpr size_t THREADS_COUNT = 1U; // Worker will not be used in this test
284 auto *tm = TaskScheduler::Create(THREADS_COUNT);
285 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::MAX_PRIORITY;
286 auto queue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
287 std::queue<TaskType> globalQueue;
288 constexpr size_t COUNT_OF_TASKS = 100U;
289 for (size_t i = 0U; i < COUNT_OF_TASKS; i++) {
290 queue->AddTask(
291 Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&globalQueue]() { globalQueue.push(TaskType::GC); }));
292 }
293 for (size_t i = 0U; i < COUNT_OF_TASKS;) {
294 i += tm->HelpWorkersWithTasks(GC_STATIC_VM_BACKGROUND_PROPERTIES);
295 }
296 ASSERT_EQ(tm->HelpWorkersWithTasks(GC_STATIC_VM_BACKGROUND_PROPERTIES), 0U) << "seed:" << GetSeed();
297 ASSERT_EQ(globalQueue.size(), COUNT_OF_TASKS) << "seed:" << GetSeed();
298 tm->Initialize();
299 tm->Finalize();
300 ASSERT_EQ(tm->HelpWorkersWithTasks(GC_STATIC_VM_BACKGROUND_PROPERTIES), 0U) << "seed:" << GetSeed();
301 tm->UnregisterAndDestroyTaskQueue<>(queue);
302 tm->Destroy();
303 }
304
TEST_F(TaskSchedulerTest,TasksWithMutex)305 TEST_F(TaskSchedulerTest, TasksWithMutex)
306 {
307 srand(GetSeed());
308 // Create TaskScheduler
309 constexpr size_t THREADS_COUNT = 4U;
310 auto *tm = TaskScheduler::Create(THREADS_COUNT);
311 // Create and register 2 queues
312 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
313 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
314 TaskQueueInterface *jitQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::JIT, VMType::STATIC_VM, QUEUE_PRIORITY);
315 // Initialize tm workers
316 tm->Initialize();
317 // Fill queues with tasks that increment counter with its type.
318 constexpr size_t COUNT_OF_TASK = 1000U;
319 std::array<size_t, 2U> counters = {0U, 0U};
320 os::memory::Mutex mainMutex;
321 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
322 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&mainMutex, &counters]() {
323 constexpr size_t GC_COUNTER = 0U;
324 os::memory::LockHolder lockHolder(mainMutex);
325 counters[GC_COUNTER]++;
326 }));
327 jitQueue->AddTask(Task::Create(JIT_STATIC_VM_BACKGROUND_PROPERTIES, [&mainMutex, &counters]() {
328 constexpr size_t JIT_COUNTER = 1U;
329 os::memory::LockHolder lockHolder(mainMutex);
330 counters[JIT_COUNTER]++;
331 }));
332 }
333 tm->Finalize();
334 for (auto &counter : counters) {
335 ASSERT_EQ(counter, COUNT_OF_TASK) << "seed:" << GetSeed();
336 }
337 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
338 tm->UnregisterAndDestroyTaskQueue<>(jitQueue);
339 TaskScheduler::Destroy();
340 }
341
TEST_F(TaskSchedulerTest,TaskCreateTaskRecursively)342 TEST_F(TaskSchedulerTest, TaskCreateTaskRecursively)
343 {
344 srand(GetSeed());
345 // Create TaskScheduler
346 constexpr size_t THREADS_COUNT = 4U;
347 auto *tm = TaskScheduler::Create(THREADS_COUNT);
348 // Create and register 2 queues
349 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::MAX_PRIORITY;
350 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
351
352 // Initialize tm workers
353 tm->Initialize();
354 std::atomic_size_t counter = 0U;
355 constexpr size_t COUNT_OF_TASK = 10U;
356 constexpr size_t COUNT_OF_REPLICAS = 6U;
357 constexpr size_t MAX_RECURSION_DEPTH = 5U;
358 std::function<void(size_t)> runner;
359 runner = [&counter, &runner, &gcQueue](size_t recursionDepth) {
360 if (recursionDepth < MAX_RECURSION_DEPTH) {
361 for (size_t j = 0; j < COUNT_OF_REPLICAS; j++) {
362 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES,
363 [runner, recursionDepth]() { runner(recursionDepth + 1U); }));
364 }
365 // Atomic with relaxed order reason: data race with counter with no synchronization or ordering
366 // constraints
367 counter.fetch_add(1U, std::memory_order_relaxed);
368 }
369 };
370 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
371 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [runner]() { runner(0U); }));
372 }
373 tm->Finalize();
374 ASSERT_TRUE(gcQueue->IsEmpty());
375 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
376 TaskScheduler::Destroy();
377 }
378
TEST_F(TaskSchedulerTest,TaskSchedulerTaskGetTask)379 TEST_F(TaskSchedulerTest, TaskSchedulerTaskGetTask)
380 {
381 srand(GetSeed());
382 // Create TaskScheduler
383 constexpr size_t THREADS_COUNT = 4U;
384 auto *tm = TaskScheduler::Create(THREADS_COUNT);
385 // Create and register 2 queues
386 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::MAX_PRIORITY;
387 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
388
389 // Initialize tm workers
390 tm->Initialize();
391 std::atomic_size_t counter = 0U;
392 constexpr size_t COUNT_OF_TASK = 100'000U;
393 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, []() {
394 size_t executedTasksCount = 0U;
395 while (true) { // wait for valid task;
396 executedTasksCount =
397 TaskScheduler::GetTaskScheduler()->HelpWorkersWithTasks(GC_STATIC_VM_BACKGROUND_PROPERTIES);
398 if (executedTasksCount > 0U) {
399 break;
400 }
401 }
402 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
403 executedTasksCount +=
404 TaskScheduler::GetTaskScheduler()->HelpWorkersWithTasks(GC_STATIC_VM_BACKGROUND_PROPERTIES);
405 }
406 }));
407 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
408 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&counter]() {
409 // Atomic with relaxed order reason: data race with counter with no synchronization or ordering
410 // constraints
411 counter.fetch_add(1U, std::memory_order_relaxed);
412 }));
413 }
414 tm->Finalize();
415 ASSERT_TRUE(gcQueue->IsEmpty());
416 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
417 TaskScheduler::Destroy();
418 }
419
TEST_F(TaskSchedulerTest,TaskSchedulerWaitForFinishAllTaskFromQueue)420 TEST_F(TaskSchedulerTest, TaskSchedulerWaitForFinishAllTaskFromQueue)
421 {
422 srand(GetSeed());
423 // Create TaskScheduler
424 constexpr size_t THREADS_COUNT = 4U;
425 auto *tm = TaskScheduler::Create(THREADS_COUNT);
426 // Create and register 2 queues
427 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
428 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
429 TaskQueueInterface *jitQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::JIT, VMType::STATIC_VM, QUEUE_PRIORITY);
430 // Fill queues with tasks that increment counter with its type.
431 constexpr size_t COUNT_OF_TASK = 10'000U;
432 std::array<std::atomic_size_t, 3U> counters = {0U, 0U, 0U};
433 for (size_t i = 0U; i < COUNT_OF_TASK; i++) {
434 gcQueue->AddTask(Task::Create(GC_STATIC_VM_BACKGROUND_PROPERTIES, [&counters]() {
435 constexpr size_t GC_BACKGROUND_COUNTER = 0U;
436 // Atomic with relaxed order reason: data race with counters[GC_BACKGROUND_COUNTER] with no synchronization
437 // or ordering constraints
438 counters[GC_BACKGROUND_COUNTER].fetch_add(1U, std::memory_order_relaxed);
439 }));
440 gcQueue->AddTask(Task::Create(GC_STATIC_VM_FOREGROUND_PROPERTIES, [&counters]() {
441 constexpr size_t GC_FOREGROUND_COUNTER = 1U;
442 // Atomic with relaxed order reason: data race with counters[GC_FOREGROUND_COUNTER] with no synchronization
443 // or ordering constraints
444 counters[GC_FOREGROUND_COUNTER].fetch_add(1U, std::memory_order_relaxed);
445 }));
446 jitQueue->AddTask(Task::Create(JIT_STATIC_VM_BACKGROUND_PROPERTIES, [&counters]() {
447 constexpr size_t JIT_COUNTER = 2U;
448 // Atomic with relaxed order reason: data race with counters[JIT_COUNTER] with no synchronization or
449 // ordering constraints
450 counters[JIT_COUNTER].fetch_add(1U, std::memory_order_relaxed);
451 }));
452 }
453 // Initialize tm workers
454 tm->Initialize();
455 tm->WaitForFinishAllTasksWithProperties(GC_STATIC_VM_FOREGROUND_PROPERTIES);
456 ASSERT_FALSE(gcQueue->HasTaskWithExecutionMode(TaskExecutionMode::FOREGROUND));
457 tm->WaitForFinishAllTasksWithProperties(GC_STATIC_VM_BACKGROUND_PROPERTIES);
458 ASSERT_TRUE(gcQueue->IsEmpty());
459 tm->WaitForFinishAllTasksWithProperties(JIT_STATIC_VM_BACKGROUND_PROPERTIES);
460 ASSERT_TRUE(jitQueue->IsEmpty());
461 tm->Finalize();
462 for (auto &counter : counters) {
463 ASSERT_EQ(counter, COUNT_OF_TASK) << "seed:" << GetSeed();
464 }
465 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
466 tm->UnregisterAndDestroyTaskQueue<>(jitQueue);
467 TaskScheduler::Destroy();
468 }
469
TEST_F(TaskSchedulerTest,TaskSchedulerAddTaskToWaitListWithTimeTest)470 TEST_F(TaskSchedulerTest, TaskSchedulerAddTaskToWaitListWithTimeTest)
471 {
472 srand(GetSeed());
473 // Create TaskScheduler
474 constexpr size_t THREADS_COUNT = 1U;
475 auto *tm = TaskScheduler::Create(THREADS_COUNT);
476 // Create and register 2 queues
477 constexpr uint8_t QUEUE_PRIORITY = TaskQueueInterface::DEFAULT_PRIORITY;
478 TaskQueueInterface *gcQueue = tm->CreateAndRegisterTaskQueue<>(TaskType::GC, VMType::STATIC_VM, QUEUE_PRIORITY);
479 // Initialize tm workers
480 tm->Initialize();
481
482 constexpr size_t WAIT_LIST_USAGE_COUNT = 5U;
483 std::atomic_size_t sleepCount = 0U;
484 std::function<void()> taskRunner = [tm, &sleepCount, &taskRunner]() {
485 if (sleepCount < WAIT_LIST_USAGE_COUNT) {
486 sleepCount++;
487 [[maybe_unused]] auto id = tm->AddTaskToWaitListWithTimeout(
488 Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::FOREGROUND}, taskRunner), 1U);
489 ASSERT(id != INVALID_WAITER_ID);
490 }
491 };
492 gcQueue->AddTask(Task::Create({TaskType::GC, VMType::STATIC_VM, TaskExecutionMode::FOREGROUND}, taskRunner));
493 tm->Finalize();
494 ASSERT_EQ(sleepCount, WAIT_LIST_USAGE_COUNT);
495 // Fill queues with tasks that increment counter with its type.
496 tm->UnregisterAndDestroyTaskQueue<>(gcQueue);
497 TaskScheduler::Destroy();
498 }
499
500 } // namespace ark::taskmanager
501