• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PROFILER_CPU_PROFILER_H_
6 #define V8_PROFILER_CPU_PROFILER_H_
7 
8 #include <memory>
9 
10 #include "src/allocation.h"
11 #include "src/base/atomic-utils.h"
12 #include "src/base/atomicops.h"
13 #include "src/base/platform/time.h"
14 #include "src/compiler.h"
15 #include "src/isolate.h"
16 #include "src/libsampler/v8-sampler.h"
17 #include "src/locked-queue.h"
18 #include "src/profiler/circular-queue.h"
19 #include "src/profiler/profiler-listener.h"
20 #include "src/profiler/tick-sample.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 // Forward declarations.
26 class CodeEntry;
27 class CodeMap;
28 class CpuProfile;
29 class CpuProfilesCollection;
30 class ProfileGenerator;
31 
32 #define CODE_EVENTS_TYPE_LIST(V)                         \
33   V(CODE_CREATION, CodeCreateEventRecord)                \
34   V(CODE_MOVE, CodeMoveEventRecord)                      \
35   V(CODE_DISABLE_OPT, CodeDisableOptEventRecord)         \
36   V(CODE_DEOPT, CodeDeoptEventRecord)                    \
37   V(REPORT_BUILTIN, ReportBuiltinEventRecord)
38 
39 
40 class CodeEventRecord {
41  public:
42 #define DECLARE_TYPE(type, ignore) type,
43   enum Type {
44     NONE = 0,
45     CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
46     NUMBER_OF_TYPES
47   };
48 #undef DECLARE_TYPE
49 
50   Type type;
51   mutable unsigned order;
52 };
53 
54 
55 class CodeCreateEventRecord : public CodeEventRecord {
56  public:
57   Address start;
58   CodeEntry* entry;
59   unsigned size;
60 
61   INLINE(void UpdateCodeMap(CodeMap* code_map));
62 };
63 
64 
65 class CodeMoveEventRecord : public CodeEventRecord {
66  public:
67   Address from;
68   Address to;
69 
70   INLINE(void UpdateCodeMap(CodeMap* code_map));
71 };
72 
73 
74 class CodeDisableOptEventRecord : public CodeEventRecord {
75  public:
76   Address start;
77   const char* bailout_reason;
78 
79   INLINE(void UpdateCodeMap(CodeMap* code_map));
80 };
81 
82 
83 class CodeDeoptEventRecord : public CodeEventRecord {
84  public:
85   Address start;
86   const char* deopt_reason;
87   SourcePosition position;
88   int deopt_id;
89   void* pc;
90   int fp_to_sp_delta;
91 
92   INLINE(void UpdateCodeMap(CodeMap* code_map));
93 };
94 
95 
96 class ReportBuiltinEventRecord : public CodeEventRecord {
97  public:
98   Address start;
99   Builtins::Name builtin_id;
100 
101   INLINE(void UpdateCodeMap(CodeMap* code_map));
102 };
103 
104 
105 class TickSampleEventRecord {
106  public:
107   // The parameterless constructor is used when we dequeue data from
108   // the ticks buffer.
TickSampleEventRecord()109   TickSampleEventRecord() { }
TickSampleEventRecord(unsigned order)110   explicit TickSampleEventRecord(unsigned order) : order(order) { }
111 
112   unsigned order;
113   TickSample sample;
114 };
115 
116 
117 class CodeEventsContainer {
118  public:
119   explicit CodeEventsContainer(
120       CodeEventRecord::Type type = CodeEventRecord::NONE) {
121     generic.type = type;
122   }
123   union  {
124     CodeEventRecord generic;
125 #define DECLARE_CLASS(ignore, type) type type##_;
126     CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
127 #undef DECLARE_TYPE
128   };
129 };
130 
131 
132 // This class implements both the profile events processor thread and
133 // methods called by event producers: VM and stack sampler threads.
134 class ProfilerEventsProcessor : public base::Thread {
135  public:
136   ProfilerEventsProcessor(ProfileGenerator* generator,
137                           sampler::Sampler* sampler,
138                           base::TimeDelta period);
139   virtual ~ProfilerEventsProcessor();
140 
141   // Thread control.
142   virtual void Run();
143   void StopSynchronously();
INLINE(bool running ())144   INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
145   void Enqueue(const CodeEventsContainer& event);
146 
147   // Puts current stack into tick sample events buffer.
148   void AddCurrentStack(Isolate* isolate, bool update_stats = false);
149   void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
150 
151   // Tick sample events are filled directly in the buffer of the circular
152   // queue (because the structure is of fixed width, but usually not all
153   // stack frame entries are filled.) This method returns a pointer to the
154   // next record of the buffer.
155   inline TickSample* StartTickSample();
156   inline void FinishTickSample();
157 
158   // SamplingCircularQueue has stricter alignment requirements than a normal new
159   // can fulfil, so we need to provide our own new/delete here.
160   void* operator new(size_t size);
161   void operator delete(void* ptr);
162 
163  private:
164   // Called from events processing thread (Run() method.)
165   bool ProcessCodeEvent();
166 
167   enum SampleProcessingResult {
168     OneSampleProcessed,
169     FoundSampleForNextCodeEvent,
170     NoSamplesInQueue
171   };
172   SampleProcessingResult ProcessOneSample();
173 
174   ProfileGenerator* generator_;
175   sampler::Sampler* sampler_;
176   base::Atomic32 running_;
177   const base::TimeDelta period_;  // Samples & code events processing period.
178   LockedQueue<CodeEventsContainer> events_buffer_;
179   static const size_t kTickSampleBufferSize = 1 * MB;
180   static const size_t kTickSampleQueueLength =
181       kTickSampleBufferSize / sizeof(TickSampleEventRecord);
182   SamplingCircularQueue<TickSampleEventRecord,
183                         kTickSampleQueueLength> ticks_buffer_;
184   LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
185   base::AtomicNumber<unsigned> last_code_event_id_;
186   unsigned last_processed_code_event_id_;
187 };
188 
189 class CpuProfiler : public CodeEventObserver {
190  public:
191   explicit CpuProfiler(Isolate* isolate);
192 
193   CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
194               ProfileGenerator* test_generator,
195               ProfilerEventsProcessor* test_processor);
196 
197   ~CpuProfiler() override;
198 
199   void set_sampling_interval(base::TimeDelta value);
200   void CollectSample();
201   void StartProfiling(const char* title, bool record_samples = false);
202   void StartProfiling(String* title, bool record_samples);
203   CpuProfile* StopProfiling(const char* title);
204   CpuProfile* StopProfiling(String* title);
205   int GetProfilesCount();
206   CpuProfile* GetProfile(int index);
207   void DeleteAllProfiles();
208   void DeleteProfile(CpuProfile* profile);
209 
210   void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
211 
212   // Invoked from stack sampler (thread or signal handler.)
213   inline TickSample* StartTickSample();
214   inline void FinishTickSample();
215 
is_profiling()216   bool is_profiling() const { return is_profiling_; }
217 
generator()218   ProfileGenerator* generator() const { return generator_.get(); }
processor()219   ProfilerEventsProcessor* processor() const { return processor_.get(); }
isolate()220   Isolate* isolate() const { return isolate_; }
221 
222  private:
223   void StartProcessorIfNotStarted();
224   void StopProcessorIfLastProfile(const char* title);
225   void StopProcessor();
226   void ResetProfiles();
227   void LogBuiltins();
228 
229   Isolate* const isolate_;
230   base::TimeDelta sampling_interval_;
231   std::unique_ptr<CpuProfilesCollection> profiles_;
232   std::unique_ptr<ProfileGenerator> generator_;
233   std::unique_ptr<ProfilerEventsProcessor> processor_;
234   bool saved_is_logging_;
235   bool is_profiling_;
236 
237   DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
238 };
239 
240 }  // namespace internal
241 }  // namespace v8
242 
243 
244 #endif  // V8_PROFILER_CPU_PROFILER_H_
245