• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "eu/execute_unit.h"
17 
18 #include <sys/resource.h>
19 #include "internal_inc/config.h"
20 #include "util/singleton_register.h"
21 #include "eu/co_routine_factory.h"
22 #include "util/ffrt_facade.h"
23 #include "dfx/sysevent/sysevent.h"
24 
25 namespace {
26 const size_t MAX_ESCAPE_WORKER_NUM = 1024;
27 constexpr uint64_t MAX_ESCAPE_INTERVAL_MS_COUNT = 1000ULL * 100 * 60 * 60 * 24 * 365; // 100 year
28 constexpr size_t MAX_TID_SIZE = 100;
29 ffrt::WorkerStatusInfo g_workerStatusInfo[ffrt::QoS::MaxNum()];
30 ffrt::fast_mutex g_workerStatusMutex[ffrt::QoS::MaxNum()];
31 }
32 
33 namespace ffrt {
34 const std::map<QoS, std::vector<std::pair<QoS, bool>>> DEFAULT_WORKER_SHARE_CONFIG = {
35     {0, {std::make_pair(1, false), std::make_pair(2, false), std::make_pair(3, false), std::make_pair(4, false)}},
36     {1, {std::make_pair(0, false), std::make_pair(2, false), std::make_pair(3, false), std::make_pair(4, false)}},
37     {2, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(3, false), std::make_pair(4, false)}},
38     {3, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(2, false), std::make_pair(4, false)}},
39     {4, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(2, false), std::make_pair(3, false)}},
40 };
41 const std::map<QoS, std::vector<std::pair<QoS, bool>>> WORKER_SHARE_CONFIG = {
42     {1, {std::make_pair(0, false), std::make_pair(2, false), std::make_pair(3, false), std::make_pair(4, false)}},
43     {2, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(3, false), std::make_pair(4, false)}},
44     {3, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(2, false), std::make_pair(4, false)}},
45     {4, {std::make_pair(0, false), std::make_pair(1, false), std::make_pair(2, false), std::make_pair(3, false)}},
46     {6, {std::make_pair(5, false)}},
47 };
48 const std::set<QoS> DEFAULT_TASK_BACKLOG_CONFIG = {0, 1, 2, 3, 4, 5};
49 const std::set<QoS> TASK_BACKLOG_CONFIG = {0, 2, 4, 5};
50 
ExecuteUnit()51 ExecuteUnit::ExecuteUnit()
52 {
53     ffrt::CoRoutineInstance(CoStackAttr::Instance()->size);
54 
55     workerGroup[qos_deadline_request].tg = std::make_unique<ThreadGroup>();
56 
57     for (auto qos = QoS::Min(); qos < QoS::Max(); ++qos) {
58         workerGroup[qos].hardLimit = DEFAULT_HARDLIMIT;
59         workerGroup[qos].maxConcurrency = GlobalConfig::Instance().getCpuWorkerNum(qos);
60     }
61 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
62     memset_s(&domainInfoMonitor, sizeof(domainInfoMonitor), 0, sizeof(domainInfoMonitor));
63     wakeupCond.check_ahead = false;
64     wakeupCond.global.low = 0;
65     wakeupCond.global.high = 0;
66     for (int i = 0; i < BLOCKAWARE_DOMAIN_ID_MAX + 1; i++) {
67         wakeupCond.local[i].low = 0;
68         if (i < qosMonitorMaxNum) {
69             wakeupCond.local[i].high = UINT_MAX;
70             wakeupCond.global.low += wakeupCond.local[i].low;
71             wakeupCond.global.high = UINT_MAX;
72         } else {
73             wakeupCond.local[i].high = 0;
74         }
75     }
76 #endif
77     for (int idx = 0; idx < QoS::MaxNum(); idx++) {
78         we_[idx] = new WaitUntilEntry;
79         we_[idx]->cb = nullptr;
80     }
81 
82     if (strstr(GetCurrentProcessName(), "CameraDaemon")) {
83         SetWorkerShare(WORKER_SHARE_CONFIG);
84         SetTaskBacklog(TASK_BACKLOG_CONFIG);
85     } else {
86         SetWorkerShare(DEFAULT_WORKER_SHARE_CONFIG);
87         SetTaskBacklog(DEFAULT_TASK_BACKLOG_CONFIG);
88     }
89 }
90 
~ExecuteUnit()91 ExecuteUnit::~ExecuteUnit()
92 {
93     // worker escape event
94     FFRT_LOGI("Destructor.");
95     for (int idx = 0; idx < QoS::MaxNum(); idx++) {
96         if (we_[idx] != nullptr) {
97             delete we_[idx];
98             we_[idx] = nullptr;
99         }
100     }
101 }
102 
Instance()103 ExecuteUnit &ExecuteUnit::Instance()
104 {
105     return SingletonRegister<ExecuteUnit>::Instance();
106 }
107 
RegistInsCb(SingleInsCB<ExecuteUnit>::Instance && cb)108 void ExecuteUnit::RegistInsCb(SingleInsCB<ExecuteUnit>::Instance &&cb)
109 {
110     SingletonRegister<ExecuteUnit>::RegistInsCb(std::move(cb));
111 }
112 
BindTG(QoS & qos)113 ThreadGroup *ExecuteUnit::BindTG(QoS& qos)
114 {
115     auto &tgwrap = workerGroup[qos];
116     if (!tgwrap.tg) {
117         return nullptr;
118     }
119 
120     std::lock_guard<std::shared_mutex> lck(tgwrap.tgMutex);
121 
122     if (tgwrap.tgRefCount++ > 0) {
123         return tgwrap.tg.get();
124     }
125 
126     if (!(tgwrap.tg->Init())) {
127         FFRT_SYSEVENT_LOGE("Init Thread Group Failed");
128         return tgwrap.tg.get();
129     }
130 
131     for (auto &thread : tgwrap.threads) {
132         pid_t tid = thread.first->Id();
133         if (!(tgwrap.tg->Join(tid))) {
134             FFRT_SYSEVENT_LOGE("Failed to Join Thread %d", tid);
135         }
136     }
137     return tgwrap.tg.get();
138 }
139 
BindWG(QoS & qos)140 void ExecuteUnit::BindWG(QoS& qos)
141 {
142     auto &tgwrap = workerGroup[qos];
143     std::shared_lock<std::shared_mutex> lck(tgwrap.tgMutex);
144     for (auto &thread : tgwrap.threads) {
145         pid_t tid = thread.first->Id();
146         if (!JoinWG(tid, qos)) {
147             FFRT_SYSEVENT_LOGE("Failed to Join Thread %d", tid);
148         }
149     }
150 }
151 
UnbindTG(QoS & qos)152 void ExecuteUnit::UnbindTG(QoS& qos)
153 {
154     auto &tgwrap = workerGroup[qos];
155     if (!tgwrap.tg) {
156         return;
157     }
158 
159     std::lock_guard<std::shared_mutex> lck(tgwrap.tgMutex);
160 
161     if (tgwrap.tgRefCount == 0) {
162         return;
163     }
164 
165     if (--tgwrap.tgRefCount == 0) {
166         if (qos != qos_user_interactive) {
167             for (auto &thread : tgwrap.threads) {
168                 pid_t tid = thread.first->Id();
169                 if (!(tgwrap.tg->Leave(tid))) {
170                     FFRT_SYSEVENT_LOGE("Failed to Leave Thread %d", tid);
171                 }
172             }
173         }
174 
175         if (!(tgwrap.tg->Release())) {
176             FFRT_SYSEVENT_LOGE("Release Thread Group Failed");
177         }
178     }
179 }
180 
SetWorkerStackSize(const QoS & qos,size_t stack_size)181 int ExecuteUnit::SetWorkerStackSize(const QoS &qos, size_t stack_size)
182 {
183     CPUWorkerGroup &group = workerGroup[qos];
184     std::lock_guard<std::shared_mutex> lck(group.tgMutex);
185     if (!group.threads.empty()) {
186         FFRT_SYSEVENT_LOGE("stack size can be set only when there is no worker.");
187         return -1;
188     }
189     int pageSize = getpagesize();
190     if (pageSize < 0) {
191         FFRT_SYSEVENT_LOGE("Invalid pagesize : %d", pageSize);
192         return -1;
193     }
194     group.workerStackSize = (stack_size - 1 + static_cast<size_t>(pageSize)) & -(static_cast<size_t>(pageSize));
195     return 0;
196 }
197 
ClampValue(uint64_t & value,uint64_t maxValue)198 void ClampValue(uint64_t& value, uint64_t maxValue)
199 {
200     if (value > maxValue) {
201         FFRT_LOGW("exceeds maximum allowed value %llu ms. Clamping to %llu ms.", value, maxValue);
202         value = maxValue;
203     }
204 }
205 
SetEscapeEnable(uint64_t oneStageIntervalMs,uint64_t twoStageIntervalMs,uint64_t threeStageIntervalMs,uint64_t oneStageWorkerNum,uint64_t twoStageWorkerNum)206 int ExecuteUnit::SetEscapeEnable(uint64_t oneStageIntervalMs, uint64_t twoStageIntervalMs,
207     uint64_t threeStageIntervalMs, uint64_t oneStageWorkerNum, uint64_t twoStageWorkerNum)
208 {
209     if (escapeConfig.enableEscape_) {
210         FFRT_LOGW("Worker escape is enabled, the interface cannot be invoked repeatedly.");
211         return 1;
212     }
213 
214     if (oneStageIntervalMs < escapeConfig.oneStageIntervalMs_ ||
215         twoStageIntervalMs < escapeConfig.twoStageIntervalMs_ ||
216         threeStageIntervalMs < escapeConfig.threeStageIntervalMs_ || oneStageWorkerNum > twoStageWorkerNum) {
217         FFRT_LOGE("Setting failed, each stage interval value [%lu, %lu, %lu] "
218                   "cannot be smaller than default value [%lu, %lu, %lu], "
219                   "and one-stage worker number [%lu] cannot be larger than two-stage worker number [%lu].",
220             oneStageIntervalMs,
221             twoStageIntervalMs,
222             threeStageIntervalMs,
223             escapeConfig.oneStageIntervalMs_,
224             escapeConfig.twoStageIntervalMs_,
225             escapeConfig.threeStageIntervalMs_,
226             oneStageWorkerNum,
227             twoStageWorkerNum);
228         return 1;
229     }
230 
231     ClampValue(oneStageIntervalMs, MAX_ESCAPE_INTERVAL_MS_COUNT);
232     ClampValue(twoStageIntervalMs, MAX_ESCAPE_INTERVAL_MS_COUNT);
233     ClampValue(threeStageIntervalMs, MAX_ESCAPE_INTERVAL_MS_COUNT);
234 
235     escapeConfig.enableEscape_ = true;
236     escapeConfig.oneStageIntervalMs_ = oneStageIntervalMs;
237     escapeConfig.twoStageIntervalMs_ = twoStageIntervalMs;
238     escapeConfig.threeStageIntervalMs_ = threeStageIntervalMs;
239     escapeConfig.oneStageWorkerNum_ = oneStageWorkerNum;
240     escapeConfig.twoStageWorkerNum_ = twoStageWorkerNum;
241     FFRT_LOGI("Enable worker escape success, one-stage interval ms %lu, two-stage interval ms %lu, "
242               "three-stage interval ms %lu, one-stage worker number %lu, two-stage worker number %lu.",
243         escapeConfig.oneStageIntervalMs_,
244         escapeConfig.twoStageIntervalMs_,
245         escapeConfig.threeStageIntervalMs_,
246         escapeConfig.oneStageWorkerNum_,
247         escapeConfig.twoStageWorkerNum_);
248     return 0;
249 }
250 
SubmitEscape(int qos,uint64_t totalWorkerNum)251 void ExecuteUnit::SubmitEscape(int qos, uint64_t totalWorkerNum)
252 {
253     // escape event has been triggered and will not be submitted repeatedly
254     if (submittedDelayedTask_[qos]) {
255         return;
256     }
257 
258     we_[qos]->tp = std::chrono::steady_clock::now() + std::chrono::milliseconds(CalEscapeInterval(totalWorkerNum));
259     if (we_[qos]->cb == nullptr) {
260         we_[qos]->cb = [this, qos](WaitEntry *we) {
261             (void)we;
262             ExecuteEscape(qos);
263             submittedDelayedTask_[qos].store(false, std::memory_order_relaxed);
264         };
265     }
266 
267     if (!DelayedWakeup(we_[qos]->tp, we_[qos], we_[qos]->cb, true)) {
268         FFRT_LOGW("Failed to set qos %d escape task.", qos);
269         return;
270     }
271 
272     submittedDelayedTask_[qos].store(true, std::memory_order_relaxed);
273 }
274 
275 std::array<std::atomic<sched_mode_type>, QoS::MaxNum()> ExecuteUnit::schedMode{};
276 
IncWorker(const QoS & qos)277 bool ExecuteUnit::IncWorker(const QoS &qos)
278 {
279     int workerQos = qos();
280     if (workerQos < 0 || workerQos >= QoS::MaxNum()) {
281         FFRT_SYSEVENT_LOGE("IncWorker qos:%d is invaild", workerQos);
282         return false;
283     }
284     if (tearDown) {
285         FFRT_SYSEVENT_LOGE("CPU Worker Manager exit");
286         return false;
287     }
288 
289     workerNum.fetch_add(1);
290     auto worker = CreateCPUWorker(qos);
291     auto uniqueWorker = std::unique_ptr<CPUWorker>(worker);
292     if (uniqueWorker == nullptr) {
293         workerNum.fetch_sub(1);
294         FFRT_SYSEVENT_LOGE("IncWorker failed: worker is nullptr\n");
295         return false;
296     }
297     {
298         std::lock_guard<std::shared_mutex> lock(workerGroup[workerQos].tgMutex);
299         if (uniqueWorker->Exited()) {
300             FFRT_SYSEVENT_LOGW("IncWorker failed: worker has exited\n");
301             goto create_success;
302         }
303 
304         auto result = workerGroup[workerQos].threads.emplace(worker, std::move(uniqueWorker));
305         if (!result.second) {
306             FFRT_SYSEVENT_LOGW("qos:%d worker insert fail:%d", workerQos, result.second);
307         }
308     }
309 create_success:
310 #ifdef FFRT_WORKER_MONITOR
311     FFRTFacade::GetWMInstance().SubmitTask();
312 #endif
313     FFRTTraceRecord::UseFfrt();
314     return true;
315 }
316 
DisableWorkerMonitor(const QoS & qos,int tid)317 void ExecuteUnit::DisableWorkerMonitor(const QoS& qos, int tid)
318 {
319 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
320     if (IsBlockAwareInit()) {
321         ret = BlockawareUnregister();
322         if (ret != 0) {
323             FFRT_SYSEVENT_LOGE("blockaware unregister fail, ret[%d]", ret);
324         }
325     }
326 #endif
327 
328     ffrt::CPUWorkerGroup& group = workerGroup[qos];
329     std::lock_guard<std::shared_mutex> lck(group.tgMutex);
330     for (const auto& thread : group.threads) {
331         if (thread.first->Id() == tid) {
332             thread.first->SetWorkerMonitorStatus(false);
333         }
334     }
335 }
336 
RestoreThreadConfig()337 void ExecuteUnit::RestoreThreadConfig()
338 {
339     for (auto qos = ffrt::QoS::Min(); qos < ffrt::QoS::Max(); ++qos) {
340         ffrt::CPUWorkerGroup &group = workerGroup[qos];
341         std::lock_guard<std::shared_mutex> lck(group.tgMutex);
342         for (auto &thread : group.threads) {
343             thread.first->SetThreadAttr(qos);
344         }
345     }
346 }
347 
NotifyWorkers(const QoS & qos,int number)348 void ExecuteUnit::NotifyWorkers(const QoS &qos, int number)
349 {
350     CPUWorkerGroup &group = workerGroup[qos];
351     std::lock_guard lg(group.lock);
352     int increasableNumber = static_cast<int>(group.maxConcurrency) - (group.executingNum + group.sleepingNum);
353     int wakeupNumber = std::min(number, group.sleepingNum);
354     for (int idx = 0; idx < wakeupNumber; idx++) {
355         WakeupWorkers(qos);
356     }
357 
358     int incNumber = std::min(number - wakeupNumber, increasableNumber);
359     for (int idx = 0; idx < incNumber; idx++) {
360         group.executingNum++;
361         IncWorker(qos);
362     }
363     FFRT_LOGD("qos[%d] inc [%d] workers, wakeup [%d] workers", static_cast<int>(qos), incNumber, wakeupNumber);
364 }
365 
WorkerShare(CPUWorker * worker,std::function<bool (int,CPUWorker *)> taskFunction)366 bool ExecuteUnit::WorkerShare(CPUWorker* worker, std::function<bool(int, CPUWorker*)> taskFunction)
367 {
368     for (const auto& pair : workerGroup[worker->GetQos()].workerShareConfig) {
369         int shareQos = pair.first;
370         bool isChangePriority = pair.second;
371         if (!isChangePriority) {
372             if (taskFunction(shareQos, worker)) {
373                 return true;
374             }
375         } else {}
376     }
377     return false;
378 }
379 
WorkerRetired(CPUWorker * thread)380 void ExecuteUnit::WorkerRetired(CPUWorker *thread)
381 {
382     thread->SetWorkerState(WorkerStatus::DESTROYED);
383     pid_t tid = thread->Id();
384     int qos = static_cast<int>(thread->GetQos());
385 
386     {
387         std::lock_guard<std::shared_mutex> lck(workerGroup[qos].tgMutex);
388         thread->SetExited();
389         thread->Detach();
390         auto worker = std::move(workerGroup[qos].threads[thread]);
391         int ret = workerGroup[qos].threads.erase(thread);
392         if (ret != 1) {
393             FFRT_SYSEVENT_LOGE("erase qos[%d] thread failed, %d elements removed", qos, ret);
394         }
395         WorkerLeaveTg(qos, tid);
396 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
397         if (IsBlockAwareInit()) {
398             ret = BlockawareUnregister();
399             if (ret != 0) {
400                 FFRT_SYSEVENT_LOGE("blockaware unregister fail, ret[%d]", ret);
401             }
402         }
403 #endif
404         worker = nullptr;
405         workerNum.fetch_sub(1);
406     }
407     FFRT_LOGD("to exit, qos[%d], tid[%d]", qos, tid);
408 }
409 
WorkerJoinTg(const QoS & qos,pid_t pid)410 void ExecuteUnit::WorkerJoinTg(const QoS &qos, pid_t pid)
411 {
412     std::shared_lock<std::shared_mutex> lock(workerGroup[qos()].tgMutex);
413     if (qos == qos_user_interactive || qos > qos_max) {
414         (void)JoinWG(pid, qos);
415         return;
416     }
417     auto &tgwrap = workerGroup[qos()];
418     if (!tgwrap.tg) {
419         return;
420     }
421 
422     if ((tgwrap.tgRefCount) == 0) {
423         return;
424     }
425 
426     tgwrap.tg->Join(pid);
427 }
428 
WorkerLeaveTg(const QoS & qos,pid_t pid)429 void ExecuteUnit::WorkerLeaveTg(const QoS &qos, pid_t pid)
430 {
431     if (qos == qos_user_interactive || qos > qos_max) {
432         (void)LeaveWG(pid, qos);
433         return;
434     }
435     auto &tgwrap = workerGroup[qos()];
436     if (!tgwrap.tg) {
437         return;
438     }
439 
440     if ((tgwrap.tgRefCount) == 0) {
441         return;
442     }
443 
444     tgwrap.tg->Leave(pid);
445 }
446 
CreateCPUWorker(const QoS & qos)447 CPUWorker *ExecuteUnit::CreateCPUWorker(const QoS &qos)
448 {
449     // default strategy of worker ops
450     CpuWorkerOps ops{
451         [this](CPUWorker *thread) { return this->WorkerIdleAction(thread); },
452         [this](CPUWorker *thread) { this->WorkerRetired(thread); },
453         [this](CPUWorker *thread) { this->WorkerPrepare(thread); },
454 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
455         [this]() { return this->IsBlockAwareInit(); },
456 #endif
457     };
458 
459     return new (std::nothrow) CPUWorker(qos, std::move(ops), workerGroup[qos].workerStackSize);
460 }
461 
462 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
IsBlockAwareInit()463 bool ExecuteUnit::IsBlockAwareInit()
464 {
465     return blockAwareInit;
466 }
467 
WakeupCond(void)468 BlockawareWakeupCond *ExecuteUnit::WakeupCond(void)
469 {
470     return &wakeupCond;
471 }
472 
MonitorMain()473 void ExecuteUnit::MonitorMain()
474 {
475     (void)WorkerInit();
476     int ret = BlockawareLoadSnapshot(keyPtr, &domainInfoMonitor);
477     if (ret != 0) {
478         FFRT_SYSEVENT_LOGE("blockaware load snapshot fail, ret[%d]", ret);
479         return;
480     }
481     for (int i = 0; i < qosMonitorMaxNum; i++) {
482         auto &info = domainInfoMonitor.localinfo[i];
483         if (info.nrRunning <= wakeupCond.local[i].low &&
484             (info.nrRunning + info.nrBlocked + info.nrSleeping) < MAX_ESCAPE_WORKER_NUM) {
485             NotifyTask<TaskNotifyType::TASK_ESCAPED>(i);
486         }
487     }
488     stopMonitor = true;
489 }
490 #endif
491 
GetRunningNum(const QoS & qos)492 size_t ExecuteUnit::GetRunningNum(const QoS &qos)
493 {
494     CPUWorkerGroup &group = workerGroup[qos()];
495     size_t runningNum = group.executingNum;
496 
497 #ifdef FFRT_WORKERS_DYNAMIC_SCALING
498     /* There is no need to update running num when executingNum < maxConcurrency */
499     if (static_cast<size_t>(group.executingNum) >= group.maxConcurrency && blockAwareInit) {
500         auto nrBlocked = BlockawareLoadSnapshotNrBlockedFast(keyPtr, qos());
501         if (static_cast<unsigned int>(group.executingNum) >= nrBlocked) {
502             /* nrRunning may not be updated in a timely manner */
503             runningNum = group.executingNum - nrBlocked;
504         } else {
505             FFRT_SYSEVENT_LOGE(
506                 "qos [%d] nrBlocked [%u] is larger than executingNum [%d].", qos(), nrBlocked, group.executingNum);
507         }
508     }
509 #endif
510 
511     return runningNum;
512 }
513 
ReportEscapeEvent(int qos,size_t totalNum)514 void ExecuteUnit::ReportEscapeEvent(int qos, size_t totalNum)
515 {
516 #ifdef FFRT_SEND_EVENT
517     WorkerEscapeReport(GetCurrentProcessName(), qos, totalNum);
518 #endif
519 }
520 
WorkerStart(int qos)521 void ExecuteUnit::WorkerStart(int qos)
522 {
523     auto& workerStatusInfo = g_workerStatusInfo[qos];
524     std::lock_guard lk(g_workerStatusMutex[qos]);
525     workerStatusInfo.startedCnt++;
526     auto& tids = workerStatusInfo.startedTids;
527     tids.push_back(ExecuteCtx::Cur()->tid);
528     if (tids.size() > MAX_TID_SIZE) {
529         tids.pop_front();
530     }
531 }
532 
WorkerExit(int qos)533 void ExecuteUnit::WorkerExit(int qos)
534 {
535     auto& workerStatusInfo = g_workerStatusInfo[qos];
536     std::lock_guard lk(g_workerStatusMutex[qos]);
537     workerStatusInfo.exitedCnt++;
538     auto& tids = workerStatusInfo.exitedTids;
539     tids.push_back(ExecuteCtx::Cur()->tid);
540     if (tids.size() > MAX_TID_SIZE) {
541         tids.pop_front();
542     }
543 }
544 
GetWorkerStatusInfoAndReset(int qos)545 WorkerStatusInfo ExecuteUnit::GetWorkerStatusInfoAndReset(int qos)
546 {
547     auto& workerStatusInfo = g_workerStatusInfo[qos];
548     WorkerStatusInfo result;
549     std::lock_guard<fast_mutex> lock(g_workerStatusMutex[qos]);
550     result = workerStatusInfo;
551     workerStatusInfo.startedCnt = 0;
552     workerStatusInfo.exitedCnt = 0;
553     std::deque<pid_t> startedEmptyDeque;
554     workerStatusInfo.startedTids.swap(startedEmptyDeque);
555     std::deque<pid_t> exitedEmptyDeque;
556     workerStatusInfo.exitedTids.swap(exitedEmptyDeque);
557     return result;
558 }
559 } // namespace ffrt
560