1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "stack_data_repeater.h"
16 #include "hook_common.h"
17
18 using namespace OHOS::Developtools::NativeDaemon;
19
StackDataRepeater(size_t maxSize)20 StackDataRepeater::StackDataRepeater(size_t maxSize)
21 {
22 maxSize_ = maxSize;
23 closed_ = false;
24 reducedStackCount_ = 0;
25 }
26
~StackDataRepeater()27 StackDataRepeater::~StackDataRepeater()
28 {
29 Close();
30 }
31
PutRawStackArray(std::array<std::shared_ptr<HookRecord>,CACHE_ARRAY_SIZE> & rawDataArray,uint32_t batchCount)32 bool StackDataRepeater::PutRawStackArray(std::array<std::shared_ptr<HookRecord>, CACHE_ARRAY_SIZE>& rawDataArray,
33 uint32_t batchCount)
34 {
35 std::unique_lock<std::mutex> lock(mutex_);
36 if ((rawDataArray.empty()) && (rawDataQueue_.size() > 0)) {
37 PROFILER_LOG_INFO(LOG_CORE, "no need put nullptr if queue has data, rawDataQueue_.size() = %zu",
38 rawDataQueue_.size());
39 return true;
40 }
41 while (rawDataQueue_.size() >= maxSize_ && !closed_) {
42 slotCondVar_.wait(lock);
43 }
44 if (closed_) {
45 return false;
46 }
47 for (uint32_t i = 0; i < batchCount; i++) {
48 rawDataQueue_.push_back(rawDataArray[i]);
49 }
50 lock.unlock();
51 itemCondVar_.notify_one();
52 return true;
53 }
54
Size()55 size_t StackDataRepeater::Size()
56 {
57 std::unique_lock<std::mutex> lock(mutex_);
58 return rawDataQueue_.size();
59 }
60
Reset()61 void StackDataRepeater::Reset()
62 {
63 std::unique_lock<std::mutex> lock(mutex_);
64 closed_ = false;
65 }
66
Close()67 void StackDataRepeater::Close()
68 {
69 {
70 std::unique_lock<std::mutex> lock(mutex_);
71 rawDataQueue_.clear();
72 closed_ = true;
73 }
74 PROFILER_LOG_INFO(LOG_CORE, "StackDataRepeater Close, reducedStackCount_ : %" PRIx64 " ", reducedStackCount_);
75 slotCondVar_.notify_all();
76 itemCondVar_.notify_all();
77 }
78
PutRawStack(const HookRecordPtr & hookData,bool isRecordAccurately)79 bool StackDataRepeater::PutRawStack(const HookRecordPtr& hookData, bool isRecordAccurately)
80 {
81 bool needInsert = true;
82 std::unique_lock<std::mutex> lock(mutex_);
83
84 if ((hookData == nullptr) && (rawDataQueue_.size() > 0)) {
85 PROFILER_LOG_INFO(LOG_CORE, "no need put nullptr if queue has data, rawDataQueue_.size() = %zu",
86 rawDataQueue_.size());
87 return true;
88 }
89 while (rawDataQueue_.size() >= maxSize_ && !closed_) {
90 slotCondVar_.wait(lock);
91 }
92 if (closed_) {
93 return false;
94 }
95
96 if (__builtin_expect((hookData != nullptr) && !isRecordAccurately, true)) {
97 if (hookData->GetType() == FREE_MSG || hookData->GetType() == FREE_MSG_SIMP) {
98 auto temp = mallocMap_.find(hookData->GetAddr());
99 // true : pair of malloc and free matched, both malloc and free will be ignored
100 // false : can not match, send free's data anyway
101 if (temp != mallocMap_.end()) {
102 temp->second->GetRawStack()->reportFlag = false; // will be ignore later
103 mallocMap_.erase(hookData->GetAddr());
104 needInsert = false;
105 }
106 } else if (hookData->GetType() == MALLOC_MSG) {
107 mallocMap_.insert(std::pair<uint64_t, std::shared_ptr<HookRecord>>(hookData->GetAddr(), hookData));
108 }
109 if (needInsert) {
110 rawDataQueue_.push_back(hookData);
111 }
112 } else {
113 rawDataQueue_.push_back(hookData);
114 }
115
116 lock.unlock();
117 itemCondVar_.notify_one();
118 return true;
119 }
120
TakeRawData(uint32_t during,clockid_t clockId,uint32_t batchCount,HookRecordPtr batchRawStack[],uint32_t statInterval,bool & isTimeOut)121 HookRecordPtr StackDataRepeater::TakeRawData(uint32_t during, clockid_t clockId, uint32_t batchCount,
122 HookRecordPtr batchRawStack[], uint32_t statInterval, bool& isTimeOut)
123 {
124 uint32_t rawDataQueueSize = 0;
125 std::unique_lock<std::mutex> lock(mutex_);
126 if (statInterval > 0 &&
127 !itemCondVar_.wait_for(lock, std::chrono::milliseconds(during),
128 [&] { return ((!rawDataQueue_.empty()) || (closed_)); })) {
129 if (rawDataQueue_.empty() && !closed_) {
130 isTimeOut = true;
131 lock.unlock();
132 slotCondVar_.notify_one();
133 return nullptr;
134 }
135 } else {
136 while (rawDataQueue_.empty() && !closed_) {
137 itemCondVar_.wait(lock);
138 }
139 }
140 if (closed_) {
141 return nullptr;
142 }
143 HookRecordPtr hookData = nullptr;
144 RawStackPtr rawStack = nullptr;
145 rawDataQueueSize = rawDataQueue_.size();
146 int resultSize = rawDataQueueSize > batchCount ? batchCount : rawDataQueueSize;
147 bool needReduceStack = rawDataQueueSize >= SPEED_UP_THRESHOLD;
148 for (int i = 0; i < resultSize; i++) {
149 hookData = rawDataQueue_.front();
150 rawDataQueue_.pop_front();
151 batchRawStack[i] = hookData;
152 rawStack = hookData->GetRawStack();
153 if ((hookData != nullptr) && (hookData->GetType() == MALLOC_MSG)) {
154 mallocMap_.erase(hookData->GetAddr());
155 if (needReduceStack) {
156 rawStack->reduceStackFlag = true;
157 reducedStackCount_++;
158 }
159 }
160 }
161
162 lock.unlock();
163 slotCondVar_.notify_one();
164 return hookData;
165 }