• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef V8_RUNTIME_CALL_STATS
6 
7 #include "src/logging/runtime-call-stats.h"
8 
9 #include <iomanip>
10 
11 #include "src/tracing/tracing-category-observer.h"
12 #include "src/utils/ostreams.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 base::TimeTicks (*RuntimeCallTimer::Now)() = &base::TimeTicks::Now;
18 
NowCPUTime()19 base::TimeTicks RuntimeCallTimer::NowCPUTime() {
20   base::ThreadTicks ticks = base::ThreadTicks::Now();
21   return base::TimeTicks::FromInternalValue(ticks.ToInternalValue());
22 }
23 
24 class RuntimeCallStatEntries {
25  public:
Print(std::ostream & os)26   void Print(std::ostream& os) {
27     if (total_call_count_ == 0) return;
28     std::sort(entries_.rbegin(), entries_.rend());
29     os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
30        << "Time" << std::setw(18) << "Count" << std::endl
31        << std::string(88, '=') << std::endl;
32     for (Entry& entry : entries_) {
33       entry.SetTotal(total_time_, total_call_count_);
34       entry.Print(os);
35     }
36     os << std::string(88, '-') << std::endl;
37     Entry("Total", total_time_, total_call_count_).Print(os);
38   }
39 
40   // By default, the compiler will usually inline this, which results in a large
41   // binary size increase: std::vector::push_back expands to a large amount of
42   // instructions, and this function is invoked repeatedly by macros.
Add(RuntimeCallCounter * counter)43   V8_NOINLINE void Add(RuntimeCallCounter* counter) {
44     if (counter->count() == 0) return;
45     entries_.push_back(
46         Entry(counter->name(), counter->time(), counter->count()));
47     total_time_ += counter->time();
48     total_call_count_ += counter->count();
49   }
50 
51  private:
52   class Entry {
53    public:
Entry(const char * name,base::TimeDelta time,uint64_t count)54     Entry(const char* name, base::TimeDelta time, uint64_t count)
55         : name_(name),
56           time_(time.InMicroseconds()),
57           count_(count),
58           time_percent_(100),
59           count_percent_(100) {}
60 
operator <(const Entry & other) const61     bool operator<(const Entry& other) const {
62       if (time_ < other.time_) return true;
63       if (time_ > other.time_) return false;
64       return count_ < other.count_;
65     }
66 
Print(std::ostream & os)67     V8_NOINLINE void Print(std::ostream& os) {
68       os.precision(2);
69       os << std::fixed << std::setprecision(2);
70       os << std::setw(50) << name_;
71       os << std::setw(10) << static_cast<double>(time_) / 1000 << "ms ";
72       os << std::setw(6) << time_percent_ << "%";
73       os << std::setw(10) << count_ << " ";
74       os << std::setw(6) << count_percent_ << "%";
75       os << std::endl;
76     }
77 
SetTotal(base::TimeDelta total_time,uint64_t total_count)78     V8_NOINLINE void SetTotal(base::TimeDelta total_time,
79                               uint64_t total_count) {
80       if (total_time.InMicroseconds() == 0) {
81         time_percent_ = 0;
82       } else {
83         time_percent_ = 100.0 * time_ / total_time.InMicroseconds();
84       }
85       count_percent_ = 100.0 * count_ / total_count;
86     }
87 
88    private:
89     const char* name_;
90     int64_t time_;
91     uint64_t count_;
92     double time_percent_;
93     double count_percent_;
94   };
95 
96   uint64_t total_call_count_ = 0;
97   base::TimeDelta total_time_;
98   std::vector<Entry> entries_;
99 };
100 
Reset()101 void RuntimeCallCounter::Reset() {
102   count_ = 0;
103   time_ = 0;
104 }
105 
Dump(v8::tracing::TracedValue * value)106 void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
107   value->BeginArray(name_);
108   value->AppendDouble(count_);
109   value->AppendDouble(time_);
110   value->EndArray();
111 }
112 
Add(RuntimeCallCounter * other)113 void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
114   count_ += other->count();
115   time_ += other->time().InMicroseconds();
116 }
117 
Snapshot()118 void RuntimeCallTimer::Snapshot() {
119   base::TimeTicks now = Now();
120   // Pause only / topmost timer in the timer stack.
121   Pause(now);
122   // Commit all the timer's elapsed time to the counters.
123   RuntimeCallTimer* timer = this;
124   while (timer != nullptr) {
125     timer->CommitTimeToCounter();
126     timer = timer->parent();
127   }
128   Resume(now);
129 }
130 
RuntimeCallStats(ThreadType thread_type)131 RuntimeCallStats::RuntimeCallStats(ThreadType thread_type)
132     : in_use_(false), thread_type_(thread_type) {
133   static const char* const kNames[] = {
134 #define CALL_BUILTIN_COUNTER(name) "GC_" #name,
135       FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER)  //
136 #undef CALL_BUILTIN_COUNTER
137 #define CALL_RUNTIME_COUNTER(name) #name,
138       FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)  //
139 #undef CALL_RUNTIME_COUNTER
140 #define CALL_RUNTIME_COUNTER(name, nargs, ressize) #name,
141       FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)  //
142 #undef CALL_RUNTIME_COUNTER
143 #define CALL_BUILTIN_COUNTER(name) #name,
144       BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)  //
145 #undef CALL_BUILTIN_COUNTER
146 #define CALL_BUILTIN_COUNTER(name) "API_" #name,
147       FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)  //
148 #undef CALL_BUILTIN_COUNTER
149 #define CALL_BUILTIN_COUNTER(name) #name,
150       FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)  //
151 #undef CALL_BUILTIN_COUNTER
152 #define THREAD_SPECIFIC_COUNTER(name) #name,
153       FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER)  //
154 #undef THREAD_SPECIFIC_COUNTER
155   };
156   for (int i = 0; i < kNumberOfCounters; i++) {
157     this->counters_[i] = RuntimeCallCounter(kNames[i]);
158   }
159   if (FLAG_rcs_cpu_time) {
160     CHECK(base::ThreadTicks::IsSupported());
161     base::ThreadTicks::WaitUntilInitialized();
162     RuntimeCallTimer::Now = &RuntimeCallTimer::NowCPUTime;
163   }
164 }
165 
166 namespace {
FirstCounter(RuntimeCallCounterId first,...)167 constexpr RuntimeCallCounterId FirstCounter(RuntimeCallCounterId first, ...) {
168   return first;
169 }
170 
171 #define THREAD_SPECIFIC_COUNTER(name) k##name,
172 constexpr RuntimeCallCounterId kFirstThreadVariantCounter =
173     FirstCounter(FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) 0);
174 #undef THREAD_SPECIFIC_COUNTER
175 
176 #define THREAD_SPECIFIC_COUNTER(name) +1
177 constexpr int kThreadVariantCounterCount =
178     0 FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER);
179 #undef THREAD_SPECIFIC_COUNTER
180 
181 constexpr auto kLastThreadVariantCounter = static_cast<RuntimeCallCounterId>(
182     kFirstThreadVariantCounter + kThreadVariantCounterCount - 1);
183 }  // namespace
184 
HasThreadSpecificCounterVariants(RuntimeCallCounterId id)185 bool RuntimeCallStats::HasThreadSpecificCounterVariants(
186     RuntimeCallCounterId id) {
187   // Check that it's in the range of the thread-specific variant counters and
188   // also that it's one of the background counters.
189   return id >= kFirstThreadVariantCounter && id <= kLastThreadVariantCounter;
190 }
191 
IsBackgroundThreadSpecificVariant(RuntimeCallCounterId id)192 bool RuntimeCallStats::IsBackgroundThreadSpecificVariant(
193     RuntimeCallCounterId id) {
194   return HasThreadSpecificCounterVariants(id) &&
195          (id - kFirstThreadVariantCounter) % 2 == 1;
196 }
197 
Enter(RuntimeCallTimer * timer,RuntimeCallCounterId counter_id)198 void RuntimeCallStats::Enter(RuntimeCallTimer* timer,
199                              RuntimeCallCounterId counter_id) {
200   DCHECK(IsCalledOnTheSameThread());
201   RuntimeCallCounter* counter = GetCounter(counter_id);
202   DCHECK_NOT_NULL(counter->name());
203   timer->Start(counter, current_timer());
204   current_timer_.SetValue(timer);
205   current_counter_.SetValue(counter);
206 }
207 
Leave(RuntimeCallTimer * timer)208 void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
209   DCHECK(IsCalledOnTheSameThread());
210   RuntimeCallTimer* stack_top = current_timer();
211   if (stack_top == nullptr) return;  // Missing timer is a result of Reset().
212   CHECK(stack_top == timer);
213   current_timer_.SetValue(timer->Stop());
214   RuntimeCallTimer* cur_timer = current_timer();
215   current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
216 }
217 
Add(RuntimeCallStats * other)218 void RuntimeCallStats::Add(RuntimeCallStats* other) {
219   for (int i = 0; i < kNumberOfCounters; i++) {
220     GetCounter(i)->Add(other->GetCounter(i));
221   }
222 }
223 
224 // static
CorrectCurrentCounterId(RuntimeCallCounterId counter_id,CounterMode mode)225 void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallCounterId counter_id,
226                                                CounterMode mode) {
227   DCHECK(IsCalledOnTheSameThread());
228   if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
229     counter_id = CounterIdForThread(counter_id);
230   }
231   DCHECK(IsCounterAppropriateForThread(counter_id));
232 
233   RuntimeCallTimer* timer = current_timer();
234   if (timer == nullptr) return;
235   RuntimeCallCounter* counter = GetCounter(counter_id);
236   timer->set_counter(counter);
237   current_counter_.SetValue(counter);
238 }
239 
IsCalledOnTheSameThread()240 bool RuntimeCallStats::IsCalledOnTheSameThread() {
241   if (thread_id_.IsValid()) return thread_id_ == ThreadId::Current();
242   thread_id_ = ThreadId::Current();
243   return true;
244 }
245 
Print()246 void RuntimeCallStats::Print() {
247   StdoutStream os;
248   Print(os);
249 }
250 
Print(std::ostream & os)251 void RuntimeCallStats::Print(std::ostream& os) {
252   RuntimeCallStatEntries entries;
253   if (current_timer_.Value() != nullptr) {
254     current_timer_.Value()->Snapshot();
255   }
256   for (int i = 0; i < kNumberOfCounters; i++) {
257     entries.Add(GetCounter(i));
258   }
259   entries.Print(os);
260 }
261 
Reset()262 void RuntimeCallStats::Reset() {
263   if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
264 
265   // In tracing, we only what to trace the time spent on top level trace events,
266   // if runtime counter stack is not empty, we should clear the whole runtime
267   // counter stack, and then reset counters so that we can dump counters into
268   // top level trace events accurately.
269   while (current_timer_.Value()) {
270     current_timer_.SetValue(current_timer_.Value()->Stop());
271   }
272 
273   for (int i = 0; i < kNumberOfCounters; i++) {
274     GetCounter(i)->Reset();
275   }
276 
277   in_use_ = true;
278 }
279 
Dump(v8::tracing::TracedValue * value)280 void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
281   for (int i = 0; i < kNumberOfCounters; i++) {
282     if (GetCounter(i)->count() > 0) GetCounter(i)->Dump(value);
283   }
284   in_use_ = false;
285 }
286 
WorkerThreadRuntimeCallStats()287 WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats()
288     : isolate_thread_id_(ThreadId::Current()) {}
289 
~WorkerThreadRuntimeCallStats()290 WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
291   if (tls_key_) base::Thread::DeleteThreadLocalKey(*tls_key_);
292 }
293 
GetKey()294 base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
295   base::MutexGuard lock(&mutex_);
296   DCHECK(TracingFlags::is_runtime_stats_enabled());
297   if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
298   return *tls_key_;
299 }
300 
NewTable()301 RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
302   DCHECK(TracingFlags::is_runtime_stats_enabled());
303   // Never create a new worker table on the isolate's main thread.
304   DCHECK_NE(ThreadId::Current(), isolate_thread_id_);
305   std::unique_ptr<RuntimeCallStats> new_table =
306       std::make_unique<RuntimeCallStats>(RuntimeCallStats::kWorkerThread);
307   RuntimeCallStats* result = new_table.get();
308 
309   base::MutexGuard lock(&mutex_);
310   tables_.push_back(std::move(new_table));
311   return result;
312 }
313 
AddToMainTable(RuntimeCallStats * main_call_stats)314 void WorkerThreadRuntimeCallStats::AddToMainTable(
315     RuntimeCallStats* main_call_stats) {
316   base::MutexGuard lock(&mutex_);
317   for (auto& worker_stats : tables_) {
318     DCHECK_NE(main_call_stats, worker_stats.get());
319     main_call_stats->Add(worker_stats.get());
320     worker_stats->Reset();
321   }
322 }
323 
WorkerThreadRuntimeCallStatsScope(WorkerThreadRuntimeCallStats * worker_stats)324 WorkerThreadRuntimeCallStatsScope::WorkerThreadRuntimeCallStatsScope(
325     WorkerThreadRuntimeCallStats* worker_stats) {
326   if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
327 
328   table_ = reinterpret_cast<RuntimeCallStats*>(
329       base::Thread::GetThreadLocal(worker_stats->GetKey()));
330   if (table_ == nullptr) {
331     table_ = worker_stats->NewTable();
332     base::Thread::SetThreadLocal(worker_stats->GetKey(), table_);
333   }
334 
335   if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
336        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
337     table_->Reset();
338   }
339 }
340 
~WorkerThreadRuntimeCallStatsScope()341 WorkerThreadRuntimeCallStatsScope::~WorkerThreadRuntimeCallStatsScope() {
342   if (V8_LIKELY(table_ == nullptr)) return;
343 
344   if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
345        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
346     auto value = v8::tracing::TracedValue::Create();
347     table_->Dump(value.get());
348     TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
349                          "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
350                          "runtime-call-stats", std::move(value));
351   }
352 }
353 
354 }  // namespace internal
355 }  // namespace v8
356 
357 #endif  // V8_RUNTIME_CALL_STATS
358