1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/profiler/profile-generator.h"
6
7 #include <algorithm>
8
9 #include "include/v8-profiler.h"
10 #include "src/base/lazy-instance.h"
11 #include "src/codegen/source-position.h"
12 #include "src/objects/shared-function-info-inl.h"
13 #include "src/profiler/cpu-profiler.h"
14 #include "src/profiler/profile-generator-inl.h"
15 #include "src/profiler/profiler-stats.h"
16 #include "src/tracing/trace-event.h"
17 #include "src/tracing/traced-value.h"
18
19 namespace v8 {
20 namespace internal {
21
SetPosition(int pc_offset,int line,int inlining_id)22 void SourcePositionTable::SetPosition(int pc_offset, int line,
23 int inlining_id) {
24 DCHECK_GE(pc_offset, 0);
25 DCHECK_GT(line, 0); // The 1-based number of the source line.
26 // It's possible that we map multiple source positions to a pc_offset in
27 // optimized code. Usually these map to the same line, so there is no
28 // difference here as we only store line number and not line/col in the form
29 // of a script offset. Ignore any subsequent sets to the same offset.
30 if (!pc_offsets_to_lines_.empty() &&
31 pc_offsets_to_lines_.back().pc_offset == pc_offset) {
32 return;
33 }
34 // Check that we are inserting in ascending order, so that the vector remains
35 // sorted.
36 DCHECK(pc_offsets_to_lines_.empty() ||
37 pc_offsets_to_lines_.back().pc_offset < pc_offset);
38 if (pc_offsets_to_lines_.empty() ||
39 pc_offsets_to_lines_.back().line_number != line ||
40 pc_offsets_to_lines_.back().inlining_id != inlining_id) {
41 pc_offsets_to_lines_.push_back({pc_offset, line, inlining_id});
42 }
43 }
44
GetSourceLineNumber(int pc_offset) const45 int SourcePositionTable::GetSourceLineNumber(int pc_offset) const {
46 if (pc_offsets_to_lines_.empty()) {
47 return v8::CpuProfileNode::kNoLineNumberInfo;
48 }
49 auto it = std::lower_bound(
50 pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
51 SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
52 if (it != pc_offsets_to_lines_.begin()) --it;
53 return it->line_number;
54 }
55
GetInliningId(int pc_offset) const56 int SourcePositionTable::GetInliningId(int pc_offset) const {
57 if (pc_offsets_to_lines_.empty()) {
58 return SourcePosition::kNotInlined;
59 }
60 auto it = std::lower_bound(
61 pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
62 SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
63 if (it != pc_offsets_to_lines_.begin()) --it;
64 return it->inlining_id;
65 }
66
Size() const67 size_t SourcePositionTable::Size() const {
68 return sizeof(*this) + pc_offsets_to_lines_.capacity() *
69 sizeof(decltype(pc_offsets_to_lines_)::value_type);
70 }
71
print() const72 void SourcePositionTable::print() const {
73 base::OS::Print(" - source position table at %p\n", this);
74 for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
75 base::OS::Print(" %d --> line_number: %d inlining_id: %d\n",
76 pos_info.pc_offset, pos_info.line_number,
77 pos_info.inlining_id);
78 }
79 }
80
81 const char* const CodeEntry::kEmptyResourceName = "";
82 const char* const CodeEntry::kEmptyBailoutReason = "";
83 const char* const CodeEntry::kNoDeoptReason = "";
84
85 const char* const CodeEntry::kProgramEntryName = "(program)";
86 const char* const CodeEntry::kIdleEntryName = "(idle)";
87 const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
88 const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
89 const char* const CodeEntry::kRootEntryName = "(root)";
90
91 // static
program_entry()92 CodeEntry* CodeEntry::program_entry() {
93 static base::LeakyObject<CodeEntry> kProgramEntry(
94 CodeEventListener::FUNCTION_TAG, CodeEntry::kProgramEntryName,
95 CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
96 v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
97 CodeEntry::CodeType::OTHER);
98 return kProgramEntry.get();
99 }
100
101 // static
idle_entry()102 CodeEntry* CodeEntry::idle_entry() {
103 static base::LeakyObject<CodeEntry> kIdleEntry(
104 CodeEventListener::FUNCTION_TAG, CodeEntry::kIdleEntryName,
105 CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
106 v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
107 CodeEntry::CodeType::OTHER);
108 return kIdleEntry.get();
109 }
110
111 // static
gc_entry()112 CodeEntry* CodeEntry::gc_entry() {
113 static base::LeakyObject<CodeEntry> kGcEntry(
114 CodeEventListener::BUILTIN_TAG, CodeEntry::kGarbageCollectorEntryName,
115 CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
116 v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
117 CodeEntry::CodeType::OTHER);
118 return kGcEntry.get();
119 }
120
121 // static
unresolved_entry()122 CodeEntry* CodeEntry::unresolved_entry() {
123 static base::LeakyObject<CodeEntry> kUnresolvedEntry(
124 CodeEventListener::FUNCTION_TAG, CodeEntry::kUnresolvedFunctionName,
125 CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
126 v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
127 CodeEntry::CodeType::OTHER);
128 return kUnresolvedEntry.get();
129 }
130
131 // static
root_entry()132 CodeEntry* CodeEntry::root_entry() {
133 static base::LeakyObject<CodeEntry> kRootEntry(
134 CodeEventListener::FUNCTION_TAG, CodeEntry::kRootEntryName,
135 CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
136 v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
137 CodeEntry::CodeType::OTHER);
138 return kRootEntry.get();
139 }
140
GetHash() const141 uint32_t CodeEntry::GetHash() const {
142 uint32_t hash = 0;
143 if (script_id_ != v8::UnboundScript::kNoScriptId) {
144 hash ^= ComputeUnseededHash(static_cast<uint32_t>(script_id_));
145 hash ^= ComputeUnseededHash(static_cast<uint32_t>(position_));
146 } else {
147 hash ^= ComputeUnseededHash(
148 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
149 hash ^= ComputeUnseededHash(
150 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
151 hash ^= ComputeUnseededHash(line_number_);
152 }
153 return hash;
154 }
155
IsSameFunctionAs(const CodeEntry * entry) const156 bool CodeEntry::IsSameFunctionAs(const CodeEntry* entry) const {
157 if (this == entry) return true;
158 if (script_id_ != v8::UnboundScript::kNoScriptId) {
159 return script_id_ == entry->script_id_ && position_ == entry->position_;
160 }
161 return name_ == entry->name_ && resource_name_ == entry->resource_name_ &&
162 line_number_ == entry->line_number_;
163 }
164
SetBuiltinId(Builtin id)165 void CodeEntry::SetBuiltinId(Builtin id) {
166 bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
167 bit_field_ = BuiltinField::update(bit_field_, id);
168 }
169
GetSourceLine(int pc_offset) const170 int CodeEntry::GetSourceLine(int pc_offset) const {
171 if (line_info_) return line_info_->GetSourceLineNumber(pc_offset);
172 return v8::CpuProfileNode::kNoLineNumberInfo;
173 }
174
SetInlineStacks(std::unordered_set<CodeEntry *,Hasher,Equals> inline_entries,std::unordered_map<int,std::vector<CodeEntryAndLineNumber>> inline_stacks)175 void CodeEntry::SetInlineStacks(
176 std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries,
177 std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
178 inline_stacks) {
179 EnsureRareData()->inline_entries_ = std::move(inline_entries);
180 rare_data_->inline_stacks_ = std::move(inline_stacks);
181 }
182
GetInlineStack(int pc_offset) const183 const std::vector<CodeEntryAndLineNumber>* CodeEntry::GetInlineStack(
184 int pc_offset) const {
185 if (!line_info_) return nullptr;
186
187 int inlining_id = line_info_->GetInliningId(pc_offset);
188 if (inlining_id == SourcePosition::kNotInlined) return nullptr;
189 DCHECK(rare_data_);
190
191 auto it = rare_data_->inline_stacks_.find(inlining_id);
192 return it != rare_data_->inline_stacks_.end() ? &it->second : nullptr;
193 }
194
set_deopt_info(const char * deopt_reason,int deopt_id,std::vector<CpuProfileDeoptFrame> inlined_frames)195 void CodeEntry::set_deopt_info(
196 const char* deopt_reason, int deopt_id,
197 std::vector<CpuProfileDeoptFrame> inlined_frames) {
198 RareData* rare_data = EnsureRareData();
199 rare_data->deopt_reason_ = deopt_reason;
200 rare_data->deopt_id_ = deopt_id;
201 rare_data->deopt_inlined_frames_ = std::move(inlined_frames);
202 }
203
FillFunctionInfo(SharedFunctionInfo shared)204 void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
205 if (!shared.script().IsScript()) return;
206 Script script = Script::cast(shared.script());
207 set_script_id(script.id());
208 set_position(shared.StartPosition());
209 if (shared.optimization_disabled()) {
210 set_bailout_reason(GetBailoutReason(shared.disabled_optimization_reason()));
211 }
212 }
213
EstimatedSize() const214 size_t CodeEntry::EstimatedSize() const {
215 size_t estimated_size = 0;
216 if (rare_data_) {
217 estimated_size += sizeof(rare_data_.get());
218
219 for (const auto& inline_entry : rare_data_->inline_entries_) {
220 estimated_size += inline_entry->EstimatedSize();
221 }
222 estimated_size += rare_data_->inline_entries_.size() *
223 sizeof(decltype(rare_data_->inline_entries_)::value_type);
224
225 for (const auto& inline_stack_pair : rare_data_->inline_stacks_) {
226 estimated_size += inline_stack_pair.second.size() *
227 sizeof(decltype(inline_stack_pair.second)::value_type);
228 }
229 estimated_size +=
230 rare_data_->inline_stacks_.size() *
231 (sizeof(decltype(rare_data_->inline_stacks_)::key_type) +
232 sizeof(decltype(rare_data_->inline_stacks_)::value_type));
233
234 estimated_size +=
235 rare_data_->deopt_inlined_frames_.capacity() *
236 sizeof(decltype(rare_data_->deopt_inlined_frames_)::value_type);
237 }
238
239 if (line_info_) {
240 estimated_size += line_info_.get()->Size();
241 }
242 return sizeof(*this) + estimated_size;
243 }
244
GetDeoptInfo()245 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
246 DCHECK(has_deopt_info());
247
248 CpuProfileDeoptInfo info;
249 info.deopt_reason = rare_data_->deopt_reason_;
250 DCHECK_NE(kNoDeoptimizationId, rare_data_->deopt_id_);
251 if (rare_data_->deopt_inlined_frames_.empty()) {
252 info.stack.push_back(CpuProfileDeoptFrame(
253 {script_id_, static_cast<size_t>(std::max(0, position()))}));
254 } else {
255 info.stack = rare_data_->deopt_inlined_frames_;
256 }
257 return info;
258 }
259
EnsureRareData()260 CodeEntry::RareData* CodeEntry::EnsureRareData() {
261 if (!rare_data_) {
262 rare_data_.reset(new RareData());
263 }
264 return rare_data_.get();
265 }
266
ReleaseStrings(StringsStorage & strings)267 void CodeEntry::ReleaseStrings(StringsStorage& strings) {
268 DCHECK_EQ(ref_count_, 0UL);
269
270 if (name_) {
271 strings.Release(name_);
272 name_ = nullptr;
273 }
274 if (resource_name_) {
275 strings.Release(resource_name_);
276 resource_name_ = nullptr;
277 }
278 }
279
print() const280 void CodeEntry::print() const {
281 base::OS::Print("CodeEntry: at %p\n", this);
282
283 base::OS::Print(" - name: %s\n", name_);
284 base::OS::Print(" - resource_name: %s\n", resource_name_);
285 base::OS::Print(" - line_number: %d\n", line_number_);
286 base::OS::Print(" - column_number: %d\n", column_number_);
287 base::OS::Print(" - script_id: %d\n", script_id_);
288 base::OS::Print(" - position: %d\n", position_);
289
290 if (line_info_) {
291 line_info_->print();
292 }
293
294 if (rare_data_) {
295 base::OS::Print(" - deopt_reason: %s\n", rare_data_->deopt_reason_);
296 base::OS::Print(" - bailout_reason: %s\n", rare_data_->bailout_reason_);
297 base::OS::Print(" - deopt_id: %d\n", rare_data_->deopt_id_);
298
299 if (!rare_data_->inline_stacks_.empty()) {
300 base::OS::Print(" - inline stacks:\n");
301 for (auto it = rare_data_->inline_stacks_.begin();
302 it != rare_data_->inline_stacks_.end(); it++) {
303 base::OS::Print(" inlining_id: [%d]\n", it->first);
304 for (const auto& e : it->second) {
305 base::OS::Print(" %s --> %d\n", e.code_entry->name(),
306 e.line_number);
307 }
308 }
309 } else {
310 base::OS::Print(" - inline stacks: (empty)\n");
311 }
312
313 if (!rare_data_->deopt_inlined_frames_.empty()) {
314 base::OS::Print(" - deopt inlined frames:\n");
315 for (const CpuProfileDeoptFrame& frame :
316 rare_data_->deopt_inlined_frames_) {
317 base::OS::Print("script_id: %d position: %zu\n", frame.script_id,
318 frame.position);
319 }
320 } else {
321 base::OS::Print(" - deopt inlined frames: (empty)\n");
322 }
323 }
324 base::OS::Print("\n");
325 }
326
~ProfileNode()327 ProfileNode::~ProfileNode() {
328 if (tree_->code_entries()) tree_->code_entries()->DecRef(entry_);
329 }
330
source_type() const331 CpuProfileNode::SourceType ProfileNode::source_type() const {
332 // Handle metadata and VM state code entry types.
333 if (entry_ == CodeEntry::program_entry() ||
334 entry_ == CodeEntry::idle_entry() || entry_ == CodeEntry::gc_entry() ||
335 entry_ == CodeEntry::root_entry()) {
336 return CpuProfileNode::kInternal;
337 }
338 if (entry_ == CodeEntry::unresolved_entry())
339 return CpuProfileNode::kUnresolved;
340
341 // Otherwise, resolve based on logger tag.
342 switch (entry_->tag()) {
343 case CodeEventListener::EVAL_TAG:
344 case CodeEventListener::SCRIPT_TAG:
345 case CodeEventListener::LAZY_COMPILE_TAG:
346 case CodeEventListener::FUNCTION_TAG:
347 return CpuProfileNode::kScript;
348 case CodeEventListener::BUILTIN_TAG:
349 case CodeEventListener::HANDLER_TAG:
350 case CodeEventListener::BYTECODE_HANDLER_TAG:
351 case CodeEventListener::NATIVE_FUNCTION_TAG:
352 case CodeEventListener::NATIVE_SCRIPT_TAG:
353 case CodeEventListener::NATIVE_LAZY_COMPILE_TAG:
354 return CpuProfileNode::kBuiltin;
355 case CodeEventListener::CALLBACK_TAG:
356 return CpuProfileNode::kCallback;
357 case CodeEventListener::REG_EXP_TAG:
358 case CodeEventListener::STUB_TAG:
359 case CodeEventListener::CODE_CREATION_EVENT:
360 case CodeEventListener::CODE_DISABLE_OPT_EVENT:
361 case CodeEventListener::CODE_MOVE_EVENT:
362 case CodeEventListener::CODE_DELETE_EVENT:
363 case CodeEventListener::CODE_MOVING_GC:
364 case CodeEventListener::SHARED_FUNC_MOVE_EVENT:
365 case CodeEventListener::SNAPSHOT_CODE_NAME_EVENT:
366 case CodeEventListener::TICK_EVENT:
367 case CodeEventListener::NUMBER_OF_LOG_EVENTS:
368 return CpuProfileNode::kInternal;
369 }
370 }
371
CollectDeoptInfo(CodeEntry * entry)372 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
373 deopt_infos_.push_back(entry->GetDeoptInfo());
374 entry->clear_deopt_info();
375 }
376
FindChild(CodeEntry * entry,int line_number)377 ProfileNode* ProfileNode::FindChild(CodeEntry* entry, int line_number) {
378 auto map_entry = children_.find({entry, line_number});
379 return map_entry != children_.end() ? map_entry->second : nullptr;
380 }
381
FindOrAddChild(CodeEntry * entry,int line_number)382 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry, int line_number) {
383 auto map_entry = children_.find({entry, line_number});
384 if (map_entry == children_.end()) {
385 ProfileNode* node = new ProfileNode(tree_, entry, this, line_number);
386 children_[{entry, line_number}] = node;
387 children_list_.push_back(node);
388 return node;
389 } else {
390 return map_entry->second;
391 }
392 }
393
394
IncrementLineTicks(int src_line)395 void ProfileNode::IncrementLineTicks(int src_line) {
396 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
397 // Increment a hit counter of a certain source line.
398 // Add a new source line if not found.
399 auto map_entry = line_ticks_.find(src_line);
400 if (map_entry == line_ticks_.end()) {
401 line_ticks_[src_line] = 1;
402 } else {
403 line_ticks_[src_line]++;
404 }
405 }
406
407
GetLineTicks(v8::CpuProfileNode::LineTick * entries,unsigned int length) const408 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
409 unsigned int length) const {
410 if (entries == nullptr || length == 0) return false;
411
412 unsigned line_count = static_cast<unsigned>(line_ticks_.size());
413
414 if (line_count == 0) return true;
415 if (length < line_count) return false;
416
417 v8::CpuProfileNode::LineTick* entry = entries;
418
419 for (auto p = line_ticks_.begin(); p != line_ticks_.end(); p++, entry++) {
420 entry->line = p->first;
421 entry->hit_count = p->second;
422 }
423
424 return true;
425 }
426
Print(int indent) const427 void ProfileNode::Print(int indent) const {
428 int line_number = line_number_ != 0 ? line_number_ : entry_->line_number();
429 base::OS::Print("%5u %*s %s:%d %d %d #%d", self_ticks_, indent, "",
430 entry_->name(), line_number, source_type(),
431 entry_->script_id(), id());
432 if (entry_->resource_name()[0] != '\0')
433 base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
434 base::OS::Print("\n");
435 for (const CpuProfileDeoptInfo& info : deopt_infos_) {
436 base::OS::Print(
437 "%*s;;; deopted at script_id: %d position: %zu with reason '%s'.\n",
438 indent + 10, "", info.stack[0].script_id, info.stack[0].position,
439 info.deopt_reason);
440 for (size_t index = 1; index < info.stack.size(); ++index) {
441 base::OS::Print("%*s;;; Inline point: script_id %d position: %zu.\n",
442 indent + 10, "", info.stack[index].script_id,
443 info.stack[index].position);
444 }
445 }
446 const char* bailout_reason = entry_->bailout_reason();
447 if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
448 bailout_reason != CodeEntry::kEmptyBailoutReason) {
449 base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
450 bailout_reason);
451 }
452 for (auto child : children_) {
453 child.second->Print(indent + 2);
454 }
455 }
456
457 class DeleteNodesCallback {
458 public:
BeforeTraversingChild(ProfileNode *,ProfileNode *)459 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
460
AfterAllChildrenTraversed(ProfileNode * node)461 void AfterAllChildrenTraversed(ProfileNode* node) { delete node; }
462
AfterChildTraversed(ProfileNode *,ProfileNode *)463 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
464 };
465
ProfileTree(Isolate * isolate,CodeEntryStorage * storage)466 ProfileTree::ProfileTree(Isolate* isolate, CodeEntryStorage* storage)
467 : next_node_id_(1),
468 isolate_(isolate),
469 code_entries_(storage),
470 root_(new ProfileNode(this, CodeEntry::root_entry(), nullptr)) {}
471
~ProfileTree()472 ProfileTree::~ProfileTree() {
473 DeleteNodesCallback cb;
474 TraverseDepthFirst(&cb);
475 }
476
AddPathFromEnd(const std::vector<CodeEntry * > & path,int src_line,bool update_stats)477 ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
478 int src_line, bool update_stats) {
479 ProfileNode* node = root_;
480 CodeEntry* last_entry = nullptr;
481 for (auto it = path.rbegin(); it != path.rend(); ++it) {
482 if (*it == nullptr) continue;
483 last_entry = *it;
484 node = node->FindOrAddChild(*it, v8::CpuProfileNode::kNoLineNumberInfo);
485 }
486 if (last_entry && last_entry->has_deopt_info()) {
487 node->CollectDeoptInfo(last_entry);
488 }
489 if (update_stats) {
490 node->IncrementSelfTicks();
491 if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
492 node->IncrementLineTicks(src_line);
493 }
494 }
495 return node;
496 }
497
AddPathFromEnd(const ProfileStackTrace & path,int src_line,bool update_stats,ProfilingMode mode)498 ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
499 int src_line, bool update_stats,
500 ProfilingMode mode) {
501 ProfileNode* node = root_;
502 CodeEntry* last_entry = nullptr;
503 int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
504 for (auto it = path.rbegin(); it != path.rend(); ++it) {
505 if (it->code_entry == nullptr) continue;
506 last_entry = it->code_entry;
507 node = node->FindOrAddChild(it->code_entry, parent_line_number);
508 parent_line_number = mode == ProfilingMode::kCallerLineNumbers
509 ? it->line_number
510 : v8::CpuProfileNode::kNoLineNumberInfo;
511 }
512 if (last_entry && last_entry->has_deopt_info()) {
513 node->CollectDeoptInfo(last_entry);
514 }
515 if (update_stats) {
516 node->IncrementSelfTicks();
517 if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
518 node->IncrementLineTicks(src_line);
519 }
520 }
521 return node;
522 }
523
524 class Position {
525 public:
Position(ProfileNode * node)526 explicit Position(ProfileNode* node)
527 : node(node), child_idx_(0) { }
current_child()528 V8_INLINE ProfileNode* current_child() {
529 return node->children()->at(child_idx_);
530 }
has_current_child()531 V8_INLINE bool has_current_child() {
532 return child_idx_ < static_cast<int>(node->children()->size());
533 }
next_child()534 V8_INLINE void next_child() { ++child_idx_; }
535
536 ProfileNode* node;
537 private:
538 int child_idx_;
539 };
540
541
542 // Non-recursive implementation of a depth-first post-order tree traversal.
543 template <typename Callback>
TraverseDepthFirst(Callback * callback)544 void ProfileTree::TraverseDepthFirst(Callback* callback) {
545 std::vector<Position> stack;
546 stack.emplace_back(root_);
547 while (stack.size() > 0) {
548 Position& current = stack.back();
549 if (current.has_current_child()) {
550 callback->BeforeTraversingChild(current.node, current.current_child());
551 stack.emplace_back(current.current_child());
552 } else {
553 callback->AfterAllChildrenTraversed(current.node);
554 if (stack.size() > 1) {
555 Position& parent = stack[stack.size() - 2];
556 callback->AfterChildTraversed(parent.node, current.node);
557 parent.next_child();
558 }
559 // Remove child from the stack.
560 stack.pop_back();
561 }
562 }
563 }
564
OnMoveEvent(Address from_address,Address to_address)565 void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
566 if (native_context_address() != from_address) return;
567
568 set_native_context_address(to_address);
569 }
570
571 using v8::tracing::TracedValue;
572
573 std::atomic<ProfilerId> CpuProfilesCollection::last_id_{0};
574
CpuProfile(CpuProfiler * profiler,ProfilerId id,const char * title,CpuProfilingOptions options,std::unique_ptr<DiscardedSamplesDelegate> delegate)575 CpuProfile::CpuProfile(CpuProfiler* profiler, ProfilerId id, const char* title,
576 CpuProfilingOptions options,
577 std::unique_ptr<DiscardedSamplesDelegate> delegate)
578 : title_(title),
579 options_(options),
580 delegate_(std::move(delegate)),
581 start_time_(base::TimeTicks::Now()),
582 top_down_(profiler->isolate(), profiler->code_entries()),
583 profiler_(profiler),
584 streaming_next_sample_(0),
585 id_(id) {
586 // The startTime timestamp is not converted to Perfetto's clock domain and
587 // will get out of sync with other timestamps Perfetto knows about, including
588 // the automatic trace event "ts" timestamp. startTime is included for
589 // backward compatibility with the tracing protocol but the value of "ts"
590 // should be used instead (it is recorded nearly immediately after).
591 auto value = TracedValue::Create();
592 value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
593 TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
594 "Profile", id_, "data", std::move(value));
595
596 DisallowHeapAllocation no_gc;
597 if (delegate_) {
598 delegate_->SetId(id_);
599 }
600 if (options_.has_filter_context()) {
601 i::Address raw_filter_context =
602 reinterpret_cast<i::Address>(options_.raw_filter_context());
603 context_filter_.set_native_context_address(raw_filter_context);
604 }
605 }
606
CheckSubsample(base::TimeDelta source_sampling_interval)607 bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
608 DCHECK_GE(source_sampling_interval, base::TimeDelta());
609
610 // If the sampling source's sampling interval is 0, record as many samples
611 // are possible irrespective of the profile's sampling interval. Manually
612 // taken samples (via CollectSample) fall into this case as well.
613 if (source_sampling_interval.IsZero()) return true;
614
615 next_sample_delta_ -= source_sampling_interval;
616 if (next_sample_delta_ <= base::TimeDelta()) {
617 next_sample_delta_ =
618 base::TimeDelta::FromMicroseconds(options_.sampling_interval_us());
619 return true;
620 }
621 return false;
622 }
623
AddPath(base::TimeTicks timestamp,const ProfileStackTrace & path,int src_line,bool update_stats,base::TimeDelta sampling_interval,StateTag state_tag,EmbedderStateTag embedder_state_tag)624 void CpuProfile::AddPath(base::TimeTicks timestamp,
625 const ProfileStackTrace& path, int src_line,
626 bool update_stats, base::TimeDelta sampling_interval,
627 StateTag state_tag,
628 EmbedderStateTag embedder_state_tag) {
629 if (!CheckSubsample(sampling_interval)) return;
630
631 ProfileNode* top_frame_node =
632 top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
633
634 bool should_record_sample =
635 !timestamp.IsNull() && timestamp >= start_time_ &&
636 (options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
637 samples_.size() < options_.max_samples());
638
639 if (should_record_sample) {
640 samples_.push_back(
641 {top_frame_node, timestamp, src_line, state_tag, embedder_state_tag});
642 }
643
644 if (!should_record_sample && delegate_ != nullptr) {
645 const auto task_runner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
646 reinterpret_cast<v8::Isolate*>(profiler_->isolate()));
647
648 task_runner->PostTask(std::make_unique<CpuProfileMaxSamplesCallbackTask>(
649 std::move(delegate_)));
650 // std::move ensures that the delegate_ will be null on the next sample,
651 // so we don't post a task multiple times.
652 }
653
654 const int kSamplesFlushCount = 100;
655 const int kNodesFlushCount = 10;
656 if (samples_.size() - streaming_next_sample_ >= kSamplesFlushCount ||
657 top_down_.pending_nodes_count() >= kNodesFlushCount) {
658 StreamPendingTraceEvents();
659 }
660 }
661
662 namespace {
663
BuildNodeValue(const ProfileNode * node,TracedValue * value)664 void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
665 const CodeEntry* entry = node->entry();
666 value->BeginDictionary("callFrame");
667 value->SetString("functionName", entry->name());
668 if (*entry->resource_name()) {
669 value->SetString("url", entry->resource_name());
670 }
671 value->SetInteger("scriptId", entry->script_id());
672 if (entry->line_number()) {
673 value->SetInteger("lineNumber", entry->line_number() - 1);
674 }
675 if (entry->column_number()) {
676 value->SetInteger("columnNumber", entry->column_number() - 1);
677 }
678 value->SetString("codeType", entry->code_type_string());
679 value->EndDictionary();
680 value->SetInteger("id", node->id());
681 if (node->parent()) {
682 value->SetInteger("parent", node->parent()->id());
683 }
684 const char* deopt_reason = entry->bailout_reason();
685 if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
686 value->SetString("deoptReason", deopt_reason);
687 }
688 }
689
690 } // namespace
691
StreamPendingTraceEvents()692 void CpuProfile::StreamPendingTraceEvents() {
693 std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
694 if (pending_nodes.empty() && samples_.empty()) return;
695 auto value = TracedValue::Create();
696
697 if (!pending_nodes.empty() || streaming_next_sample_ != samples_.size()) {
698 value->BeginDictionary("cpuProfile");
699 if (!pending_nodes.empty()) {
700 value->BeginArray("nodes");
701 for (auto node : pending_nodes) {
702 value->BeginDictionary();
703 BuildNodeValue(node, value.get());
704 value->EndDictionary();
705 }
706 value->EndArray();
707 }
708 if (streaming_next_sample_ != samples_.size()) {
709 value->BeginArray("samples");
710 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
711 value->AppendInteger(samples_[i].node->id());
712 }
713 value->EndArray();
714 }
715 value->EndDictionary();
716 }
717 if (streaming_next_sample_ != samples_.size()) {
718 // timeDeltas are computed within CLOCK_MONOTONIC. However, trace event
719 // "ts" timestamps are converted to CLOCK_BOOTTIME by Perfetto. To get
720 // absolute timestamps in CLOCK_BOOTTIME from timeDeltas, add them to
721 // the "ts" timestamp from the initial "Profile" trace event sent by
722 // CpuProfile::CpuProfile().
723 //
724 // Note that if the system is suspended and resumed while samples_ is
725 // captured, timeDeltas derived after resume will not be convertible to
726 // correct CLOCK_BOOTTIME time values (for instance, producing
727 // CLOCK_BOOTTIME time values in the middle of the suspended period).
728 value->BeginArray("timeDeltas");
729 base::TimeTicks lastTimestamp =
730 streaming_next_sample_ ? samples_[streaming_next_sample_ - 1].timestamp
731 : start_time();
732 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
733 value->AppendInteger(static_cast<int>(
734 (samples_[i].timestamp - lastTimestamp).InMicroseconds()));
735 lastTimestamp = samples_[i].timestamp;
736 }
737 value->EndArray();
738 bool has_non_zero_lines =
739 std::any_of(samples_.begin() + streaming_next_sample_, samples_.end(),
740 [](const SampleInfo& sample) { return sample.line != 0; });
741 if (has_non_zero_lines) {
742 value->BeginArray("lines");
743 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
744 value->AppendInteger(samples_[i].line);
745 }
746 value->EndArray();
747 }
748 streaming_next_sample_ = samples_.size();
749 }
750
751 TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
752 "ProfileChunk", id_, "data", std::move(value));
753 }
754
FinishProfile()755 void CpuProfile::FinishProfile() {
756 end_time_ = base::TimeTicks::Now();
757 // Stop tracking context movements after profiling stops.
758 context_filter_.set_native_context_address(kNullAddress);
759 StreamPendingTraceEvents();
760 auto value = TracedValue::Create();
761 // The endTime timestamp is not converted to Perfetto's clock domain and will
762 // get out of sync with other timestamps Perfetto knows about, including the
763 // automatic trace event "ts" timestamp. endTime is included for backward
764 // compatibility with the tracing protocol: its presence in "data" is used by
765 // devtools to identify the last ProfileChunk but the value of "ts" should be
766 // used instead (it is recorded nearly immediately after).
767 value->SetDouble("endTime", end_time_.since_origin().InMicroseconds());
768 TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
769 "ProfileChunk", id_, "data", std::move(value));
770 }
771
772 namespace {
773
FlattenNodesTree(const v8::CpuProfileNode * node,std::vector<const v8::CpuProfileNode * > * nodes)774 void FlattenNodesTree(const v8::CpuProfileNode* node,
775 std::vector<const v8::CpuProfileNode*>* nodes) {
776 nodes->emplace_back(node);
777 const int childrenCount = node->GetChildrenCount();
778 for (int i = 0; i < childrenCount; i++)
779 FlattenNodesTree(node->GetChild(i), nodes);
780 }
781
782 } // namespace
783
Serialize(v8::OutputStream * stream)784 void CpuProfileJSONSerializer::Serialize(v8::OutputStream* stream) {
785 DCHECK_NULL(writer_);
786 writer_ = new OutputStreamWriter(stream);
787 SerializeImpl();
788 delete writer_;
789 writer_ = nullptr;
790 }
791
SerializePositionTicks(const v8::CpuProfileNode * node,int lineCount)792 void CpuProfileJSONSerializer::SerializePositionTicks(
793 const v8::CpuProfileNode* node, int lineCount) {
794 std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
795 if (node->GetLineTicks(&entries[0], lineCount)) {
796 for (int i = 0; i < lineCount; i++) {
797 writer_->AddCharacter('{');
798 writer_->AddString("\"line\":");
799 writer_->AddNumber(entries[i].line);
800 writer_->AddString(",\"ticks\":");
801 writer_->AddNumber(entries[i].hit_count);
802 writer_->AddCharacter('}');
803 if (i != (lineCount - 1)) writer_->AddCharacter(',');
804 }
805 }
806 }
807
SerializeCallFrame(const v8::CpuProfileNode * node)808 void CpuProfileJSONSerializer::SerializeCallFrame(
809 const v8::CpuProfileNode* node) {
810 writer_->AddString("\"functionName\":\"");
811 writer_->AddString(node->GetFunctionNameStr());
812 writer_->AddString("\",\"lineNumber\":");
813 writer_->AddNumber(node->GetLineNumber() - 1);
814 writer_->AddString(",\"columnNumber\":");
815 writer_->AddNumber(node->GetColumnNumber() - 1);
816 writer_->AddString(",\"scriptId\":");
817 writer_->AddNumber(node->GetScriptId());
818 writer_->AddString(",\"url\":\"");
819 writer_->AddString(node->GetScriptResourceNameStr());
820 writer_->AddCharacter('"');
821 }
822
SerializeChildren(const v8::CpuProfileNode * node,int childrenCount)823 void CpuProfileJSONSerializer::SerializeChildren(const v8::CpuProfileNode* node,
824 int childrenCount) {
825 for (int i = 0; i < childrenCount; i++) {
826 writer_->AddNumber(node->GetChild(i)->GetNodeId());
827 if (i != (childrenCount - 1)) writer_->AddCharacter(',');
828 }
829 }
830
SerializeNode(const v8::CpuProfileNode * node)831 void CpuProfileJSONSerializer::SerializeNode(const v8::CpuProfileNode* node) {
832 writer_->AddCharacter('{');
833 writer_->AddString("\"id\":");
834 writer_->AddNumber(node->GetNodeId());
835
836 writer_->AddString(",\"hitCount\":");
837 writer_->AddNumber(node->GetHitCount());
838
839 writer_->AddString(",\"callFrame\":{");
840 SerializeCallFrame(node);
841 writer_->AddCharacter('}');
842
843 const int childrenCount = node->GetChildrenCount();
844 if (childrenCount) {
845 writer_->AddString(",\"children\":[");
846 SerializeChildren(node, childrenCount);
847 writer_->AddCharacter(']');
848 }
849
850 const char* deoptReason = node->GetBailoutReason();
851 if (deoptReason && deoptReason[0] && strcmp(deoptReason, "no reason")) {
852 writer_->AddString(",\"deoptReason\":\"");
853 writer_->AddString(deoptReason);
854 writer_->AddCharacter('"');
855 }
856
857 unsigned lineCount = node->GetHitLineCount();
858 if (lineCount) {
859 writer_->AddString(",\"positionTicks\":[");
860 SerializePositionTicks(node, lineCount);
861 writer_->AddCharacter(']');
862 }
863 writer_->AddCharacter('}');
864 }
865
SerializeNodes()866 void CpuProfileJSONSerializer::SerializeNodes() {
867 std::vector<const v8::CpuProfileNode*> nodes;
868 FlattenNodesTree(
869 reinterpret_cast<const v8::CpuProfileNode*>(profile_->top_down()->root()),
870 &nodes);
871
872 for (size_t i = 0; i < nodes.size(); i++) {
873 SerializeNode(nodes.at(i));
874 if (writer_->aborted()) return;
875 if (i != (nodes.size() - 1)) writer_->AddCharacter(',');
876 }
877 }
878
SerializeTimeDeltas()879 void CpuProfileJSONSerializer::SerializeTimeDeltas() {
880 int count = profile_->samples_count();
881 uint64_t lastTime = profile_->start_time().since_origin().InMicroseconds();
882 for (int i = 0; i < count; i++) {
883 uint64_t ts = profile_->sample(i).timestamp.since_origin().InMicroseconds();
884 writer_->AddNumber(static_cast<int>(ts - lastTime));
885 if (i != (count - 1)) writer_->AddString(",");
886 lastTime = ts;
887 }
888 }
889
SerializeSamples()890 void CpuProfileJSONSerializer::SerializeSamples() {
891 int count = profile_->samples_count();
892 for (int i = 0; i < count; i++) {
893 writer_->AddNumber(profile_->sample(i).node->id());
894 if (i != (count - 1)) writer_->AddString(",");
895 }
896 }
897
SerializeImpl()898 void CpuProfileJSONSerializer::SerializeImpl() {
899 writer_->AddCharacter('{');
900 writer_->AddString("\"nodes\":[");
901 SerializeNodes();
902 writer_->AddString("]");
903
904 writer_->AddString(",\"startTime\":");
905 writer_->AddNumber(static_cast<unsigned>(
906 profile_->start_time().since_origin().InMicroseconds()));
907
908 writer_->AddString(",\"endTime\":");
909 writer_->AddNumber(static_cast<unsigned>(
910 profile_->end_time().since_origin().InMicroseconds()));
911
912 writer_->AddString(",\"samples\":[");
913 SerializeSamples();
914 if (writer_->aborted()) return;
915 writer_->AddCharacter(']');
916
917 writer_->AddString(",\"timeDeltas\":[");
918 SerializeTimeDeltas();
919 if (writer_->aborted()) return;
920 writer_->AddString("]");
921
922 writer_->AddCharacter('}');
923 writer_->Finalize();
924 }
925
Print() const926 void CpuProfile::Print() const {
927 base::OS::Print("[Top down]:\n");
928 top_down_.Print();
929 ProfilerStats::Instance()->Print();
930 ProfilerStats::Instance()->Clear();
931 }
932
AddRef(CodeEntry * entry)933 void CodeEntryStorage::AddRef(CodeEntry* entry) {
934 if (entry->is_ref_counted()) entry->AddRef();
935 }
936
DecRef(CodeEntry * entry)937 void CodeEntryStorage::DecRef(CodeEntry* entry) {
938 if (entry->is_ref_counted() && entry->DecRef() == 0) {
939 if (entry->rare_data_) {
940 for (auto* inline_entry : entry->rare_data_->inline_entries_) {
941 DecRef(inline_entry);
942 }
943 }
944 entry->ReleaseStrings(function_and_resource_names_);
945 delete entry;
946 }
947 }
948
CodeMap(CodeEntryStorage & storage)949 CodeMap::CodeMap(CodeEntryStorage& storage) : code_entries_(storage) {}
950
~CodeMap()951 CodeMap::~CodeMap() { Clear(); }
952
Clear()953 void CodeMap::Clear() {
954 for (auto& slot : code_map_) {
955 if (CodeEntry* entry = slot.second.entry) {
956 code_entries_.DecRef(entry);
957 } else {
958 // We expect all entries in the code mapping to contain a CodeEntry.
959 UNREACHABLE();
960 }
961 }
962
963 code_map_.clear();
964 }
965
AddCode(Address addr,CodeEntry * entry,unsigned size)966 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
967 code_map_.emplace(addr, CodeEntryMapInfo{entry, size});
968 entry->set_instruction_start(addr);
969 }
970
RemoveCode(CodeEntry * entry)971 bool CodeMap::RemoveCode(CodeEntry* entry) {
972 auto range = code_map_.equal_range(entry->instruction_start());
973 for (auto i = range.first; i != range.second; ++i) {
974 if (i->second.entry == entry) {
975 code_entries_.DecRef(entry);
976 code_map_.erase(i);
977 return true;
978 }
979 }
980 return false;
981 }
982
ClearCodesInRange(Address start,Address end)983 void CodeMap::ClearCodesInRange(Address start, Address end) {
984 auto left = code_map_.upper_bound(start);
985 if (left != code_map_.begin()) {
986 --left;
987 if (left->first + left->second.size <= start) ++left;
988 }
989 auto right = left;
990 for (; right != code_map_.end() && right->first < end; ++right) {
991 code_entries_.DecRef(right->second.entry);
992 }
993 code_map_.erase(left, right);
994 }
995
FindEntry(Address addr,Address * out_instruction_start)996 CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
997 // Note that an address may correspond to multiple CodeEntry objects. An
998 // arbitrary selection is made (as per multimap spec) in the event of a
999 // collision.
1000 auto it = code_map_.upper_bound(addr);
1001 if (it == code_map_.begin()) return nullptr;
1002 --it;
1003 Address start_address = it->first;
1004 Address end_address = start_address + it->second.size;
1005 CodeEntry* ret = addr < end_address ? it->second.entry : nullptr;
1006 DCHECK(!ret || (addr >= start_address && addr < end_address));
1007 if (ret && out_instruction_start) *out_instruction_start = start_address;
1008 return ret;
1009 }
1010
MoveCode(Address from,Address to)1011 void CodeMap::MoveCode(Address from, Address to) {
1012 if (from == to) return;
1013
1014 auto range = code_map_.equal_range(from);
1015 // Instead of iterating until |range.second|, iterate the number of elements.
1016 // This is because the |range.second| may no longer be the element past the
1017 // end of the equal elements range after insertions.
1018 size_t distance = std::distance(range.first, range.second);
1019 auto it = range.first;
1020 while (distance--) {
1021 CodeEntryMapInfo& info = it->second;
1022 DCHECK(info.entry);
1023 DCHECK_EQ(info.entry->instruction_start(), from);
1024 info.entry->set_instruction_start(to);
1025
1026 DCHECK(from + info.size <= to || to + info.size <= from);
1027 code_map_.emplace(to, info);
1028 it++;
1029 }
1030
1031 code_map_.erase(range.first, it);
1032 }
1033
Print()1034 void CodeMap::Print() {
1035 for (const auto& pair : code_map_) {
1036 base::OS::Print("%p %5d %s\n", reinterpret_cast<void*>(pair.first),
1037 pair.second.size, pair.second.entry->name());
1038 }
1039 }
1040
GetEstimatedMemoryUsage() const1041 size_t CodeMap::GetEstimatedMemoryUsage() const {
1042 size_t map_size = 0;
1043 for (const auto& pair : code_map_) {
1044 map_size += sizeof(pair.first) + sizeof(pair.second) +
1045 pair.second.entry->EstimatedSize();
1046 }
1047 return sizeof(*this) + map_size;
1048 }
1049
CpuProfilesCollection(Isolate * isolate)1050 CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
1051 : profiler_(nullptr), current_profiles_semaphore_(1), isolate_(isolate) {
1052 USE(isolate_);
1053 }
1054
StartProfilingForTesting(ProfilerId id)1055 CpuProfilingResult CpuProfilesCollection::StartProfilingForTesting(
1056 ProfilerId id) {
1057 return StartProfiling(id);
1058 }
1059
StartProfiling(const char * title,CpuProfilingOptions options,std::unique_ptr<DiscardedSamplesDelegate> delegate)1060 CpuProfilingResult CpuProfilesCollection::StartProfiling(
1061 const char* title, CpuProfilingOptions options,
1062 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
1063 return StartProfiling(++last_id_, title, options, std::move(delegate));
1064 }
1065
StartProfiling(ProfilerId id,const char * title,CpuProfilingOptions options,std::unique_ptr<DiscardedSamplesDelegate> delegate)1066 CpuProfilingResult CpuProfilesCollection::StartProfiling(
1067 ProfilerId id, const char* title, CpuProfilingOptions options,
1068 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
1069 current_profiles_semaphore_.Wait();
1070
1071 if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
1072 current_profiles_semaphore_.Signal();
1073 return {
1074 0,
1075 CpuProfilingStatus::kErrorTooManyProfilers,
1076 };
1077 }
1078
1079 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1080 if ((profile->title() != nullptr && title != nullptr &&
1081 strcmp(profile->title(), title) == 0) ||
1082 profile->id() == id) {
1083 // Ignore attempts to start profile with the same title or id
1084 current_profiles_semaphore_.Signal();
1085 // ... though return kAlreadyStarted to force it collect a sample.
1086 return {
1087 profile->id(),
1088 CpuProfilingStatus::kAlreadyStarted,
1089 };
1090 }
1091 }
1092
1093 CpuProfile* profile =
1094 new CpuProfile(profiler_, id, title, options, std::move(delegate));
1095 current_profiles_.emplace_back(profile);
1096 current_profiles_semaphore_.Signal();
1097
1098 return {
1099 profile->id(),
1100 CpuProfilingStatus::kStarted,
1101 };
1102 }
1103
StopProfiling(ProfilerId id)1104 CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
1105 current_profiles_semaphore_.Wait();
1106 CpuProfile* profile = nullptr;
1107
1108 auto it = std::find_if(
1109 current_profiles_.rbegin(), current_profiles_.rend(),
1110 [=](const std::unique_ptr<CpuProfile>& p) { return id == p->id(); });
1111
1112 if (it != current_profiles_.rend()) {
1113 (*it)->FinishProfile();
1114 profile = it->get();
1115 finished_profiles_.push_back(std::move(*it));
1116 // Convert reverse iterator to matching forward iterator.
1117 current_profiles_.erase(--(it.base()));
1118 }
1119 current_profiles_semaphore_.Signal();
1120 return profile;
1121 }
1122
Lookup(const char * title)1123 CpuProfile* CpuProfilesCollection::Lookup(const char* title) {
1124 // Called from VM thread, and only it can mutate the list,
1125 // so no locking is needed here.
1126 DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
1127 if (title == nullptr) {
1128 return nullptr;
1129 }
1130 // http://crbug/51594, edge case console.profile may provide an empty title
1131 // and must not crash
1132 const bool empty_title = title[0] == '\0';
1133 auto it = std::find_if(
1134 current_profiles_.rbegin(), current_profiles_.rend(),
1135 [&](const std::unique_ptr<CpuProfile>& p) {
1136 return (empty_title ||
1137 (p->title() != nullptr && strcmp(p->title(), title) == 0));
1138 });
1139 if (it != current_profiles_.rend()) {
1140 return it->get();
1141 }
1142
1143 return nullptr;
1144 }
1145
IsLastProfileLeft(ProfilerId id)1146 bool CpuProfilesCollection::IsLastProfileLeft(ProfilerId id) {
1147 // Called from VM thread, and only it can mutate the list,
1148 // so no locking is needed here.
1149 DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
1150 if (current_profiles_.size() != 1) return false;
1151 return id == current_profiles_[0]->id();
1152 }
1153
RemoveProfile(CpuProfile * profile)1154 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
1155 // Called from VM thread for a completed profile.
1156 DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
1157 auto pos =
1158 std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
1159 [&](const std::unique_ptr<CpuProfile>& finished_profile) {
1160 return finished_profile.get() == profile;
1161 });
1162 DCHECK(pos != finished_profiles_.end());
1163 finished_profiles_.erase(pos);
1164 }
1165
1166 namespace {
1167
GreatestCommonDivisor(int64_t a,int64_t b)1168 int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
1169 return b ? GreatestCommonDivisor(b, a % b) : a;
1170 }
1171
1172 } // namespace
1173
GetCommonSamplingInterval() const1174 base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
1175 DCHECK(profiler_);
1176
1177 int64_t base_sampling_interval_us =
1178 profiler_->sampling_interval().InMicroseconds();
1179 if (base_sampling_interval_us == 0) return base::TimeDelta();
1180
1181 int64_t interval_us = 0;
1182 for (const auto& profile : current_profiles_) {
1183 // Snap the profile's requested sampling interval to the next multiple of
1184 // the base sampling interval.
1185 int64_t profile_interval_us =
1186 std::max<int64_t>(
1187 (profile->sampling_interval_us() + base_sampling_interval_us - 1) /
1188 base_sampling_interval_us,
1189 1) *
1190 base_sampling_interval_us;
1191 interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
1192 }
1193 return base::TimeDelta::FromMicroseconds(interval_us);
1194 }
1195
AddPathToCurrentProfiles(base::TimeTicks timestamp,const ProfileStackTrace & path,int src_line,bool update_stats,base::TimeDelta sampling_interval,StateTag state,EmbedderStateTag embedder_state_tag,Address native_context_address,Address embedder_native_context_address)1196 void CpuProfilesCollection::AddPathToCurrentProfiles(
1197 base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
1198 bool update_stats, base::TimeDelta sampling_interval, StateTag state,
1199 EmbedderStateTag embedder_state_tag, Address native_context_address,
1200 Address embedder_native_context_address) {
1201 // As starting / stopping profiles is rare relatively to this
1202 // method, we don't bother minimizing the duration of lock holding,
1203 // e.g. copying contents of the list to a local vector.
1204 current_profiles_semaphore_.Wait();
1205 const ProfileStackTrace empty_path;
1206 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1207 ContextFilter& context_filter = profile->context_filter();
1208 // If the context filter check failed, omit the contents of the stack.
1209 bool accepts_context = context_filter.Accept(native_context_address);
1210 bool accepts_embedder_context =
1211 context_filter.Accept(embedder_native_context_address);
1212
1213 // if FilterContext is set, do not propagate StateTag if not accepted.
1214 // GC is exception because native context address is guaranteed to be empty.
1215 DCHECK(state != StateTag::GC || native_context_address == kNullAddress);
1216 if (!accepts_context && state != StateTag::GC) {
1217 state = StateTag::IDLE;
1218 }
1219 profile->AddPath(timestamp, accepts_context ? path : empty_path, src_line,
1220 update_stats, sampling_interval, state,
1221 accepts_embedder_context ? embedder_state_tag
1222 : EmbedderStateTag::EMPTY);
1223 }
1224 current_profiles_semaphore_.Signal();
1225 }
1226
UpdateNativeContextAddressForCurrentProfiles(Address from,Address to)1227 void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
1228 Address from, Address to) {
1229 current_profiles_semaphore_.Wait();
1230 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1231 profile->context_filter().OnMoveEvent(from, to);
1232 }
1233 current_profiles_semaphore_.Signal();
1234 }
1235
1236 } // namespace internal
1237 } // namespace v8
1238