1 /*
2 * Copyright (c) 2023-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/dfx/hprof/heap_sampling.h"
17
18
19 namespace panda::ecmascript {
HeapSampling(const EcmaVM * vm,Heap * const heap,uint64_t interval,int stackDepth)20 HeapSampling::HeapSampling(const EcmaVM *vm, Heap *const heap, uint64_t interval, int stackDepth)
21 : vm_(vm),
22 heap_(heap),
23 rate_(interval),
24 stackDepth_(stackDepth),
25 allocationInspector_(heap_, rate_, this)
26 {
27 samplingInfo_ = std::make_unique<struct SamplingInfo>();
28 samplingInfo_->head_.callFrameInfo_.functionName_ = "(root)";
29 samplingInfo_->head_.id_ = CreateNodeId();
30 heap_->AddAllocationInspectorToAllSpaces(&allocationInspector_);
31 vm_->GetJSThread()->SetIsStartHeapSampling(true);
32 }
33
~HeapSampling()34 HeapSampling::~HeapSampling()
35 {
36 heap_->ClearAllocationInspectorFromAllSpaces();
37 vm_->GetJSThread()->SetIsStartHeapSampling(false);
38 }
39
GetAllocationProfile()40 const struct SamplingInfo *HeapSampling::GetAllocationProfile()
41 {
42 CalNodeSelfSize(&samplingInfo_->head_);
43 return samplingInfo_.get();
44 }
45
ImplementSampling(Address addr,size_t size)46 void HeapSampling::ImplementSampling([[maybe_unused]] Address addr, size_t size)
47 {
48 GetStack();
49 SamplingNode *node = PushAndGetNode();
50 node->allocations_[size]++;
51 samplingInfo_->samples_.emplace_back(Sample(size, node->id_, CreateSampleId(), AdjustSampleCount(size, 1)));
52 }
53
PushStackInfo(const struct MethodKey & methodKey)54 bool HeapSampling::PushStackInfo(const struct MethodKey &methodKey)
55 {
56 if (UNLIKELY(frameStack_.size() >= static_cast<size_t>(stackDepth_))) {
57 return false;
58 }
59 frameStack_.emplace_back(methodKey);
60 return true;
61 }
62
PushFrameInfo(const FrameInfoTemp & frameInfoTemp)63 bool HeapSampling::PushFrameInfo(const FrameInfoTemp &frameInfoTemp)
64 {
65 if (UNLIKELY(frameInfoTemps_.size() >= static_cast<size_t>(stackDepth_))) {
66 return false;
67 }
68 frameInfoTemps_.emplace_back(frameInfoTemp);
69 return true;
70 }
71
ResetFrameLength()72 void HeapSampling::ResetFrameLength()
73 {
74 frameInfoTemps_.clear();
75 frameStack_.clear();
76 }
77
GetStack()78 void HeapSampling::GetStack()
79 {
80 ResetFrameLength();
81 JSThread *thread = vm_->GetAssociatedJSThread();
82 JSTaggedType *frame = const_cast<JSTaggedType *>(thread->GetCurrentFrame());
83 if (frame == nullptr) {
84 return;
85 }
86 if (JsStackGetter::CheckFrameType(thread, frame)) {
87 FrameHandler frameHandler(thread);
88 FrameIterator it(frameHandler.GetSp(), thread);
89 bool topFrame = true;
90 int stackCounter = 0;
91 for (; !it.Done() && stackCounter < stackDepth_; it.Advance<>()) {
92 auto method = it.CheckAndGetMethod();
93 if (method == nullptr) {
94 continue;
95 }
96 bool isNative = method->IsNativeWithCallField();
97 struct MethodKey methodKey;
98 if (topFrame) {
99 methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, true);
100 topFrame = false;
101 } else {
102 methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, false);
103 }
104 void *methodIdentifier = JsStackGetter::GetMethodIdentifier(method, it);
105 if (methodIdentifier == nullptr) {
106 continue;
107 }
108 methodKey.methodIdentifier = methodIdentifier;
109 if (stackInfoMap_.count(methodKey) == 0) {
110 struct FrameInfoTemp codeEntry;
111 if (UNLIKELY(!JsStackGetter::ParseMethodInfo(methodKey, it, vm_, codeEntry))) {
112 continue;
113 }
114 if (UNLIKELY(!PushFrameInfo(codeEntry))) {
115 return;
116 }
117 }
118 if (UNLIKELY(!PushStackInfo(methodKey))) {
119 return;
120 }
121 ++stackCounter;
122 }
123 if (!it.Done()) {
124 LOG_ECMA(INFO) << "Heap sampling actual stack depth is greater than the setted depth: " << stackDepth_;
125 }
126 }
127 }
128
FillScriptIdAndStore()129 void HeapSampling::FillScriptIdAndStore()
130 {
131 size_t len = frameInfoTemps_.size();
132 if (len == 0) {
133 return;
134 }
135 struct CallFrameInfo callframeInfo;
136 for (size_t i = 0; i < len; ++i) {
137 callframeInfo.url_ = frameInfoTemps_[i].url;
138 auto iter = scriptIdMap_.find(callframeInfo.url_);
139 if (iter == scriptIdMap_.end()) {
140 scriptIdMap_.emplace(callframeInfo.url_, scriptIdMap_.size() + 1); // scriptId start from 1
141 callframeInfo.scriptId_ = static_cast<int>(scriptIdMap_.size());
142 } else {
143 callframeInfo.scriptId_ = iter->second;
144 }
145 callframeInfo.functionName_ = AddRunningState(frameInfoTemps_[i].functionName,
146 frameInfoTemps_[i].methodKey.state,
147 frameInfoTemps_[i].methodKey.deoptType);
148 callframeInfo.codeType_ = frameInfoTemps_[i].codeType;
149 callframeInfo.columnNumber_ = frameInfoTemps_[i].columnNumber;
150 callframeInfo.lineNumber_ = frameInfoTemps_[i].lineNumber;
151 stackInfoMap_.emplace(frameInfoTemps_[i].methodKey, callframeInfo);
152 }
153 frameInfoTemps_.clear();
154 }
155
AddRunningState(char * functionName,RunningState state,kungfu::DeoptType type)156 std::string HeapSampling::AddRunningState(char *functionName, RunningState state, kungfu::DeoptType type)
157 {
158 std::string result = functionName;
159 if (state == RunningState::AOT && type != kungfu::DeoptType::NONE) {
160 state = RunningState::AINT;
161 }
162 if (state == RunningState::BUILTIN) {
163 result.append("(BUILTIN)");
164 }
165 return result;
166 }
167
PushAndGetNode()168 SamplingNode *HeapSampling::PushAndGetNode()
169 {
170 FillScriptIdAndStore();
171 SamplingNode *node = &(samplingInfo_->head_);
172 int frameLen = static_cast<int>(frameStack_.size()) - 1;
173 for (; frameLen >= 0; frameLen--) {
174 node = FindOrAddNode(node, frameStack_[frameLen]);
175 }
176 return node;
177 }
178
GetMethodInfo(const MethodKey & methodKey)179 struct CallFrameInfo HeapSampling::GetMethodInfo(const MethodKey &methodKey)
180 {
181 struct CallFrameInfo frameInfo;
182 auto iter = stackInfoMap_.find(methodKey);
183 if (iter != stackInfoMap_.end()) {
184 frameInfo = iter->second;
185 }
186 return frameInfo;
187 }
188
FindOrAddNode(struct SamplingNode * node,const MethodKey & methodKey)189 struct SamplingNode *HeapSampling::FindOrAddNode(struct SamplingNode *node, const MethodKey &methodKey)
190 {
191 struct SamplingNode *childNode = nullptr;
192 if (node->children_.count(methodKey) != 0) {
193 childNode = node->children_[methodKey].get();
194 }
195 if (childNode == nullptr) {
196 std::unique_ptr<struct SamplingNode> tempNode = std::make_unique<struct SamplingNode>();
197 tempNode->callFrameInfo_ = GetMethodInfo(methodKey);
198 tempNode->id_ = CreateNodeId();
199 node->children_.emplace(methodKey, std::move(tempNode));
200 return node->children_[methodKey].get();
201 }
202 return childNode;
203 }
204
CreateNodeId()205 uint32_t HeapSampling::CreateNodeId()
206 {
207 return ++nodeId_;
208 }
209
CreateSampleId()210 uint64_t HeapSampling::CreateSampleId()
211 {
212 return ++sampleId_;
213 }
214
215 // We collect samples according to a Poisson Process. Because sampling can not record
216 // all allocations, we need estimate real allocations of all spaces based on the collected
217 // samples. Given that sampling rate is R, the probability sampling an allocation of size S
218 // is 1-exp(-S/R). So when collect *count* samples with size *size*, we can use the above
219 // probability to approximate the real count of allocations with size *size*.
AdjustSampleCount(size_t size,unsigned int count) const220 unsigned int HeapSampling::AdjustSampleCount(size_t size, unsigned int count) const
221 {
222 double scale = 1.0 / (1.0 - std::exp(-static_cast<double>(size) / rate_));
223 return static_cast<unsigned int>(count * scale + base::HALF);
224 }
225
CalNodeSelfSize(SamplingNode * node)226 void HeapSampling::CalNodeSelfSize(SamplingNode *node)
227 {
228 node->selfSize_ = 0;
229 for (const auto &alloc : node->allocations_) {
230 unsigned int realCount = AdjustSampleCount(alloc.first, alloc.second);
231 node->selfSize_ += alloc.first * realCount;
232 }
233 for (auto &child : node->children_) {
234 CalNodeSelfSize(child.second.get());
235 }
236 }
237 } // namespace panda::ecmascript
238