• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "stack_data_repeater.h"
16 #include "hook_common.h"
17 
18 using namespace OHOS::Developtools::NativeDaemon;
19 
StackDataRepeater(size_t maxSize)20 StackDataRepeater::StackDataRepeater(size_t maxSize)
21 {
22     maxSize_ = maxSize;
23     closed_ = false;
24     reducedStackCount_ = 0;
25     for (int index = 0; index < CACHE_DATA_SIZE; ++index) {
26         rawDataCacheQueue_.emplace_back(std::make_shared<RawStack>());
27     }
28 }
29 
~StackDataRepeater()30 StackDataRepeater::~StackDataRepeater()
31 {
32     Close();
33 }
34 
GetRawStack()35 RawStackPtr StackDataRepeater::GetRawStack()
36 {
37     std::unique_lock<std::mutex> lock(cacheMutex_);
38     if (!rawDataCacheQueue_.empty()) {
39         RawStackPtr rawStack = rawDataCacheQueue_.back();
40         rawDataCacheQueue_.pop_back();
41         return rawStack;
42     }
43     return std::make_shared<RawStack>();
44 }
45 
ReturnRawStack(RawStackPtr rawStack)46 void StackDataRepeater::ReturnRawStack(RawStackPtr rawStack)
47 {
48     std::unique_lock<std::mutex> lock(cacheMutex_);
49     if (rawDataCacheQueue_.size() <= CACHE_DATA_SIZE) {
50         rawStack->Reset();
51         rawDataCacheQueue_.push_back(rawStack);
52     }
53 }
54 
PutRawStackArray(std::array<std::shared_ptr<RawStack>,CACHE_ARRAY_SIZE> & rawDataArray,uint32_t batchCount)55 bool StackDataRepeater::PutRawStackArray(std::array<std::shared_ptr<RawStack>, CACHE_ARRAY_SIZE>& rawDataArray,
56                                          uint32_t batchCount)
57 {
58     std::unique_lock<std::mutex> lock(mutex_);
59     if ((rawDataArray.empty()) && (rawDataQueue_.size() > 0)) {
60         PROFILER_LOG_INFO(LOG_CORE, "no need put nullptr if queue has data, rawDataQueue_.size() = %zu",
61                           rawDataQueue_.size());
62         return true;
63     }
64     while (rawDataQueue_.size() >= maxSize_ && !closed_) {
65         slotCondVar_.wait(lock);
66     }
67     if (closed_) {
68         return false;
69     }
70     for (uint32_t i = 0; i < batchCount; i++) {
71         rawDataQueue_.push_back(rawDataArray[i]);
72     }
73     lock.unlock();
74     itemCondVar_.notify_one();
75     return true;
76 }
77 
Size()78 size_t StackDataRepeater::Size()
79 {
80     std::unique_lock<std::mutex> lock(mutex_);
81     return rawDataQueue_.size();
82 }
83 
Reset()84 void StackDataRepeater::Reset()
85 {
86     std::unique_lock<std::mutex> lock(mutex_);
87     closed_ = false;
88 }
89 
Close()90 void StackDataRepeater::Close()
91 {
92     {
93         std::unique_lock<std::mutex> lock(mutex_);
94         rawDataQueue_.clear();
95         closed_ = true;
96     }
97     PROFILER_LOG_INFO(LOG_CORE, "StackDataRepeater Close, reducedStackCount_ : %" PRIx64 " ", reducedStackCount_);
98     slotCondVar_.notify_all();
99     itemCondVar_.notify_all();
100 }
101 
PutRawStack(const RawStackPtr & rawData,bool isRecordAccurately)102 bool StackDataRepeater::PutRawStack(const RawStackPtr& rawData, bool isRecordAccurately)
103 {
104     bool needInsert = true;
105     std::unique_lock<std::mutex> lock(mutex_);
106 
107     if ((rawData == nullptr) && (rawDataQueue_.size() > 0)) {
108         PROFILER_LOG_INFO(LOG_CORE, "no need put nullptr if queue has data, rawDataQueue_.size() = %zu",
109                           rawDataQueue_.size());
110         return true;
111     }
112     while (rawDataQueue_.size() >= maxSize_ && !closed_) {
113         slotCondVar_.wait(lock);
114     }
115     if (closed_) {
116         return false;
117     }
118 
119     if (__builtin_expect((rawData != nullptr) && !isRecordAccurately, true)) {
120         if (rawData->stackConext->type == FREE_MSG) {
121             auto temp = mallocMap_.find(rawData->stackConext->addr);
122             // true  : pair of malloc and free matched, both malloc and free will be ignored
123             // false : can not match, send free's data anyway
124             if (temp != mallocMap_.end()) {
125                 temp->second->reportFlag = false; // will be ignore later
126                 mallocMap_.erase(rawData->stackConext->addr);
127                 needInsert = false;
128             }
129         } else if (rawData->stackConext->type == MALLOC_MSG) {
130             mallocMap_.insert(std::pair<void*, std::shared_ptr<RawStack>>(rawData->stackConext->addr, rawData));
131         }
132         if (needInsert) {
133             rawDataQueue_.push_back(rawData);
134         }
135     } else {
136         rawDataQueue_.push_back(rawData);
137     }
138 
139     lock.unlock();
140     itemCondVar_.notify_one();
141     return true;
142 }
143 
TakeRawData(uint32_t during,clockid_t clockId,uint32_t batchCount,RawStackPtr batchRawStack[],uint32_t statInterval,bool & isTimeOut)144 RawStackPtr StackDataRepeater::TakeRawData(uint32_t during, clockid_t clockId, uint32_t batchCount,
145                                            RawStackPtr batchRawStack[], uint32_t statInterval, bool& isTimeOut)
146 {
147     uint32_t rawDataQueueSize = 0;
148     std::unique_lock<std::mutex> lock(mutex_);
149     if (statInterval > 0 &&
150         !itemCondVar_.wait_for(lock, std::chrono::milliseconds(during), [&] { return !rawDataQueue_.empty(); })) {
151         if (rawDataQueue_.empty() && !closed_) {
152             isTimeOut = true;
153             lock.unlock();
154             slotCondVar_.notify_one();
155             return nullptr;
156         }
157     } else {
158         while (rawDataQueue_.empty() && !closed_) {
159             itemCondVar_.wait(lock);
160         }
161     }
162     if (closed_) {
163         return nullptr;
164     }
165     RawStackPtr result = nullptr;
166     rawDataQueueSize = rawDataQueue_.size();
167     int resultSize = rawDataQueueSize > batchCount ? batchCount : rawDataQueueSize;
168     bool needReduceStack = rawDataQueueSize >= SPEED_UP_THRESHOLD;
169     for (int i = 0; i < resultSize; i++) {
170         result = rawDataQueue_.front();
171         rawDataQueue_.pop_front();
172         batchRawStack[i] = result;
173         if ((result != nullptr) && (result->stackConext != nullptr) && (result->stackConext->type == MALLOC_MSG)) {
174             mallocMap_.erase(result->stackConext->addr);
175             if (needReduceStack) {
176                 result->reduceStackFlag = true;
177                 reducedStackCount_++;
178             }
179         }
180     }
181 
182     lock.unlock();
183     slotCondVar_.notify_one();
184     return result;
185 }