• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_CPU_PROFILER_H_
29 #define V8_CPU_PROFILER_H_
30 
31 #ifdef ENABLE_LOGGING_AND_PROFILING
32 
33 #include "atomicops.h"
34 #include "circular-queue.h"
35 #include "unbound-queue.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 // Forward declarations.
41 class CodeEntry;
42 class CodeMap;
43 class CpuProfile;
44 class CpuProfilesCollection;
45 class HashMap;
46 class ProfileGenerator;
47 class TokenEnumerator;
48 
49 #define CODE_EVENTS_TYPE_LIST(V)                                   \
50   V(CODE_CREATION,    CodeCreateEventRecord)                       \
51   V(CODE_MOVE,        CodeMoveEventRecord)                         \
52   V(CODE_DELETE,      CodeDeleteEventRecord)                       \
53   V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
54 
55 
56 class CodeEventRecord {
57  public:
58 #define DECLARE_TYPE(type, ignore) type,
59   enum Type {
60     NONE = 0,
61     CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
62     NUMBER_OF_TYPES
63   };
64 #undef DECLARE_TYPE
65 
66   Type type;
67   unsigned order;
68 };
69 
70 
71 class CodeCreateEventRecord : public CodeEventRecord {
72  public:
73   Address start;
74   CodeEntry* entry;
75   unsigned size;
76   Address shared;
77 
78   INLINE(void UpdateCodeMap(CodeMap* code_map));
79 };
80 
81 
82 class CodeMoveEventRecord : public CodeEventRecord {
83  public:
84   Address from;
85   Address to;
86 
87   INLINE(void UpdateCodeMap(CodeMap* code_map));
88 };
89 
90 
91 class CodeDeleteEventRecord : public CodeEventRecord {
92  public:
93   Address start;
94 
95   INLINE(void UpdateCodeMap(CodeMap* code_map));
96 };
97 
98 
99 class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
100  public:
101   Address from;
102   Address to;
103 
104   INLINE(void UpdateCodeMap(CodeMap* code_map));
105 };
106 
107 
108 class TickSampleEventRecord BASE_EMBEDDED {
109  public:
TickSampleEventRecord()110   TickSampleEventRecord()
111       : filler(1) {
112     ASSERT(filler != SamplingCircularQueue::kClear);
113   }
114 
115   // The first machine word of a TickSampleEventRecord must not ever
116   // become equal to SamplingCircularQueue::kClear.  As both order and
117   // TickSample's first field are not reliable in this sense (order
118   // can overflow, TickSample can have all fields reset), we are
119   // forced to use an artificial filler field.
120   int filler;
121   unsigned order;
122   TickSample sample;
123 
cast(void * value)124   static TickSampleEventRecord* cast(void* value) {
125     return reinterpret_cast<TickSampleEventRecord*>(value);
126   }
127 
128   INLINE(static TickSampleEventRecord* init(void* value));
129 };
130 
131 
132 // This class implements both the profile events processor thread and
133 // methods called by event producers: VM and stack sampler threads.
134 class ProfilerEventsProcessor : public Thread {
135  public:
136   explicit ProfilerEventsProcessor(Isolate* isolate,
137                                    ProfileGenerator* generator);
~ProfilerEventsProcessor()138   virtual ~ProfilerEventsProcessor() {}
139 
140   // Thread control.
141   virtual void Run();
Stop()142   inline void Stop() { running_ = false; }
INLINE(bool running ())143   INLINE(bool running()) { return running_; }
144 
145   // Events adding methods. Called by VM threads.
146   void CallbackCreateEvent(Logger::LogEventsAndTags tag,
147                            const char* prefix, String* name,
148                            Address start);
149   void CodeCreateEvent(Logger::LogEventsAndTags tag,
150                        String* name,
151                        String* resource_name, int line_number,
152                        Address start, unsigned size,
153                        Address shared);
154   void CodeCreateEvent(Logger::LogEventsAndTags tag,
155                        const char* name,
156                        Address start, unsigned size);
157   void CodeCreateEvent(Logger::LogEventsAndTags tag,
158                        int args_count,
159                        Address start, unsigned size);
160   void CodeMoveEvent(Address from, Address to);
161   void CodeDeleteEvent(Address from);
162   void SharedFunctionInfoMoveEvent(Address from, Address to);
163   void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
164                              const char* prefix, String* name,
165                              Address start, unsigned size);
166   // Puts current stack into tick sample events buffer.
167   void AddCurrentStack();
168 
169   // Tick sample events are filled directly in the buffer of the circular
170   // queue (because the structure is of fixed width, but usually not all
171   // stack frame entries are filled.) This method returns a pointer to the
172   // next record of the buffer.
173   INLINE(TickSample* TickSampleEvent());
174 
175  private:
176   union CodeEventsContainer {
177     CodeEventRecord generic;
178 #define DECLARE_CLASS(ignore, type) type type##_;
179     CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
180 #undef DECLARE_TYPE
181   };
182 
183   // Called from events processing thread (Run() method.)
184   bool ProcessCodeEvent(unsigned* dequeue_order);
185   bool ProcessTicks(unsigned dequeue_order);
186 
187   INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
188 
189   ProfileGenerator* generator_;
190   bool running_;
191   UnboundQueue<CodeEventsContainer> events_buffer_;
192   SamplingCircularQueue ticks_buffer_;
193   UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
194   unsigned enqueue_order_;
195 };
196 
197 } }  // namespace v8::internal
198 
199 
200 #define PROFILE(isolate, Call)                         \
201   LOG(isolate, Call);                                  \
202   do {                                                 \
203     if (v8::internal::CpuProfiler::is_profiling()) {   \
204       v8::internal::CpuProfiler::Call;                 \
205     }                                                  \
206   } while (false)
207 #else
208 #define PROFILE(isolate, Call) LOG(isolate, Call)
209 #endif  // ENABLE_LOGGING_AND_PROFILING
210 
211 
212 namespace v8 {
213 namespace internal {
214 
215 
216 // TODO(isolates): isolatify this class.
217 class CpuProfiler {
218  public:
219   static void Setup();
220   static void TearDown();
221 
222 #ifdef ENABLE_LOGGING_AND_PROFILING
223   static void StartProfiling(const char* title);
224   static void StartProfiling(String* title);
225   static CpuProfile* StopProfiling(const char* title);
226   static CpuProfile* StopProfiling(Object* security_token, String* title);
227   static int GetProfilesCount();
228   static CpuProfile* GetProfile(Object* security_token, int index);
229   static CpuProfile* FindProfile(Object* security_token, unsigned uid);
230   static void DeleteAllProfiles();
231   static void DeleteProfile(CpuProfile* profile);
232   static bool HasDetachedProfiles();
233 
234   // Invoked from stack sampler (thread or signal handler.)
235   static TickSample* TickSampleEvent(Isolate* isolate);
236 
237   // Must be called via PROFILE macro, otherwise will crash when
238   // profiling is not enabled.
239   static void CallbackEvent(String* name, Address entry_point);
240   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
241                               Code* code, const char* comment);
242   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
243                               Code* code, String* name);
244   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
245                               Code* code,
246                               SharedFunctionInfo *shared,
247                               String* name);
248   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
249                               Code* code,
250                               SharedFunctionInfo *shared,
251                               String* source, int line);
252   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
253                               Code* code, int args_count);
CodeMovingGCEvent()254   static void CodeMovingGCEvent() {}
255   static void CodeMoveEvent(Address from, Address to);
256   static void CodeDeleteEvent(Address from);
257   static void GetterCallbackEvent(String* name, Address entry_point);
258   static void RegExpCodeCreateEvent(Code* code, String* source);
259   static void SetterCallbackEvent(String* name, Address entry_point);
260   static void SharedFunctionInfoMoveEvent(Address from, Address to);
261 
262   // TODO(isolates): this doesn't have to use atomics anymore.
263 
INLINE(bool is_profiling ())264   static INLINE(bool is_profiling()) {
265     return is_profiling(Isolate::Current());
266   }
267 
INLINE(bool is_profiling (Isolate * isolate))268   static INLINE(bool is_profiling(Isolate* isolate)) {
269     CpuProfiler* profiler = isolate->cpu_profiler();
270     return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
271   }
272 
273  private:
274   CpuProfiler();
275   ~CpuProfiler();
276   void StartCollectingProfile(const char* title);
277   void StartCollectingProfile(String* title);
278   void StartProcessorIfNotStarted();
279   CpuProfile* StopCollectingProfile(const char* title);
280   CpuProfile* StopCollectingProfile(Object* security_token, String* title);
281   void StopProcessorIfLastProfile(const char* title);
282   void StopProcessor();
283   void ResetProfiles();
284 
285   CpuProfilesCollection* profiles_;
286   unsigned next_profile_uid_;
287   TokenEnumerator* token_enumerator_;
288   ProfileGenerator* generator_;
289   ProfilerEventsProcessor* processor_;
290   int saved_logging_nesting_;
291   bool need_to_stop_sampler_;
292   Atomic32 is_profiling_;
293 
294 #else
INLINE(bool is_profiling ())295   static INLINE(bool is_profiling()) { return false; }
296 #endif  // ENABLE_LOGGING_AND_PROFILING
297 
298  private:
299   DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
300 };
301 
302 } }  // namespace v8::internal
303 
304 
305 #endif  // V8_CPU_PROFILER_H_
306