• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "thread_pool.h"
16 #include <cstring>
17 #include "script_utils.h"
18 
19 namespace Uscript {
20 static ThreadPool* g_threadPool = nullptr;
21 static std::mutex g_initMutex;
22 
CreateThreadPool(int number)23 ThreadPool* ThreadPool::CreateThreadPool(int number)
24 {
25     USCRIPT_CHECK(number > 1, return nullptr, "Invalid number %d", number);
26     std::lock_guard<std::mutex> lock(g_initMutex);
27     if (g_threadPool != nullptr) {
28         return g_threadPool;
29     }
30     g_threadPool = new ThreadPool();
31     g_threadPool->Init(number);
32     return g_threadPool;
33 }
34 
Destroy()35 void ThreadPool::Destroy()
36 {
37     if (g_threadPool == nullptr) {
38         return;
39     }
40     std::lock_guard<std::mutex> lock(g_initMutex);
41     delete g_threadPool;
42     g_threadPool = nullptr;
43 }
44 
Init(int32_t numberThread)45 void ThreadPool::Init(int32_t numberThread)
46 {
47     threadNumber_ = numberThread;
48     taskQueue_.resize(THREAD_POOL_MAX_TASKS);
49     for (size_t t = 0; t < taskQueue_.size(); ++t) {
50         taskQueue_[t].available = true;
51         for (int32_t i = 0; i < threadNumber_; ++i) {
52             taskQueue_[t].subTaskFlag.emplace_back(new std::atomic_bool { false });
53         }
54     }
55     // Create workers
56     for (int32_t threadIndex = 1; threadIndex < threadNumber_; ++threadIndex) {
57         workers_.emplace_back(std::thread(ThreadPool::ThreadExecute, this, threadIndex));
58     }
59 }
60 
ThreadRun(int32_t threadIndex)61 void ThreadPool::ThreadRun(int32_t threadIndex)
62 {
63     while (!stop_) {
64         for (int32_t k = 0; k < THREAD_POOL_MAX_TASKS; ++k) {
65             if (*taskQueue_[k].subTaskFlag[threadIndex]) {
66                 taskQueue_[k].task.processor(threadIndex);
67                 *taskQueue_[k].subTaskFlag[threadIndex] = false;
68             }
69         }
70         std::this_thread::yield();
71     }
72     printf("ThreadPool::ThreadRun %d exit \n", threadIndex);
73 }
74 
~ThreadPool()75 ThreadPool::~ThreadPool()
76 {
77     {
78         std::lock_guard<std::mutex> lock(queueMutex_);
79         stop_ = true;
80     }
81     for (auto& worker : workers_) {
82         worker.join();
83     }
84     for (auto& task : taskQueue_) {
85         for (auto c : task.subTaskFlag) {
86             delete c;
87         }
88     }
89 }
90 
AddTask(Task && task)91 void ThreadPool::AddTask(Task &&task)
92 {
93     if (g_threadPool != nullptr) {
94         g_threadPool->AddNewTask(std::move(task));
95     }
96 }
97 
AddNewTask(Task && task)98 void ThreadPool::AddNewTask(Task &&task)
99 {
100     int32_t index = AcquireWorkIndex();
101     USCRIPT_LOGI("ThreadPool::AddNewTask %d ", index);
102 
103     // If there are no multi-works
104     // Do not need to enqueue, execute it immediately
105     if (task.workSize <= 1 || index < 0) {
106         for (int32_t i = 0; i < task.workSize; ++i) {
107             task.processor(i);
108         }
109         return;
110     }
111 
112     int32_t workSize = task.workSize;
113     // If too much works, Create new task to do the work.
114     if (workSize > threadNumber_) {
115         Task newTask;
116         newTask.workSize = threadNumber_;
117         newTask.processor = [workSize, &task, this](int tId) {
118             for (int v = tId; v < workSize; v += threadNumber_) {
119                 task.processor(v);
120             }
121         };
122         RunTask(std::move(newTask), index);
123     } else {
124         RunTask(std::move(task), index);
125     }
126 
127     // Works done. make this task available
128     std::lock_guard<std::mutex> lock(queueMutex_);
129     taskQueue_[index].available = true;
130 }
131 
AcquireWorkIndex()132 int32_t ThreadPool::AcquireWorkIndex()
133 {
134     std::lock_guard<std::mutex> lock(queueMutex_);
135     for (int32_t i = 0; i < THREAD_POOL_MAX_TASKS; ++i) {
136         if (taskQueue_[i].available) {
137             taskQueue_[i].available = false;
138             return i;
139         }
140     }
141     return -1;
142 }
143 
RunTask(Task && task,int32_t index)144 void ThreadPool::RunTask(Task &&task, int32_t index)
145 {
146     int32_t workSize = task.workSize;
147     taskQueue_[index].task = std::move(task);
148     // Mark each task should be executed
149     for (int32_t i = 1; i < workSize; ++i) {
150         *taskQueue_[index].subTaskFlag[i] = true;
151     }
152 
153     // Execute first task
154     taskQueue_[index].task.processor(0);
155     bool complete = true;
156     do {
157         std::this_thread::yield();
158         complete = true;
159         // 检查是否所有子任务执行结束
160         for (int32_t i = 1; i < workSize; ++i) {
161             if (*taskQueue_[index].subTaskFlag[i]) {
162                 complete = false;
163                 break;
164             }
165         }
166     } while (!complete);
167 }
168 } // namespace Uscript
169