• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PROFILER_PROFILE_GENERATOR_H_
6 #define V8_PROFILER_PROFILE_GENERATOR_H_
7 
8 #include <atomic>
9 #include <deque>
10 #include <limits>
11 #include <map>
12 #include <memory>
13 #include <unordered_map>
14 #include <utility>
15 #include <vector>
16 
17 #include "include/v8-profiler.h"
18 #include "src/base/platform/time.h"
19 #include "src/builtins/builtins.h"
20 #include "src/execution/vm-state.h"
21 #include "src/logging/code-events.h"
22 #include "src/profiler/strings-storage.h"
23 #include "src/utils/allocation.h"
24 
25 namespace v8 {
26 namespace internal {
27 
28 struct TickSample;
29 
30 // Provides a mapping from the offsets within generated code or a bytecode array
31 // to the source line and inlining id.
32 class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
33  public:
34   SourcePositionTable() = default;
35   SourcePositionTable(const SourcePositionTable&) = delete;
36   SourcePositionTable& operator=(const SourcePositionTable&) = delete;
37 
38   void SetPosition(int pc_offset, int line, int inlining_id);
39   int GetSourceLineNumber(int pc_offset) const;
40   int GetInliningId(int pc_offset) const;
41 
42   size_t Size() const;
43   void print() const;
44 
45  private:
46   struct SourcePositionTuple {
47     bool operator<(const SourcePositionTuple& other) const {
48       return pc_offset < other.pc_offset;
49     }
50     int pc_offset;
51     int line_number;
52     int inlining_id;
53   };
54   // This is logically a map, but we store it as a vector of tuples, sorted by
55   // the pc offset, so that we can save space and look up items using binary
56   // search.
57   std::vector<SourcePositionTuple> pc_offsets_to_lines_;
58 };
59 
60 struct CodeEntryAndLineNumber;
61 
62 class CodeEntry {
63  public:
64   enum class CodeType { JS, WASM, OTHER };
65 
66   // CodeEntry may reference strings (|name|, |resource_name|) managed by a
67   // StringsStorage instance. These must be freed via ReleaseStrings.
68   inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
69                    const char* resource_name = CodeEntry::kEmptyResourceName,
70                    int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
71                    int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
72                    std::unique_ptr<SourcePositionTable> line_info = nullptr,
73                    bool is_shared_cross_origin = false,
74                    CodeType code_type = CodeType::JS);
75   CodeEntry(const CodeEntry&) = delete;
76   CodeEntry& operator=(const CodeEntry&) = delete;
~CodeEntry()77   ~CodeEntry() {
78     // No alive handles should be associated with the CodeEntry at time of
79     // destruction.
80     DCHECK(!heap_object_location_);
81     DCHECK_EQ(ref_count_, 0UL);
82   }
83 
name()84   const char* name() const { return name_; }
resource_name()85   const char* resource_name() const { return resource_name_; }
line_number()86   int line_number() const { return line_number_; }
column_number()87   int column_number() const { return column_number_; }
line_info()88   const SourcePositionTable* line_info() const { return line_info_.get(); }
script_id()89   int script_id() const { return script_id_; }
set_script_id(int script_id)90   void set_script_id(int script_id) { script_id_ = script_id; }
position()91   int position() const { return position_; }
set_position(int position)92   void set_position(int position) { position_ = position; }
set_bailout_reason(const char * bailout_reason)93   void set_bailout_reason(const char* bailout_reason) {
94     EnsureRareData()->bailout_reason_ = bailout_reason;
95   }
bailout_reason()96   const char* bailout_reason() const {
97     return rare_data_ ? rare_data_->bailout_reason_ : kEmptyBailoutReason;
98   }
99 
100   void set_deopt_info(const char* deopt_reason, int deopt_id,
101                       std::vector<CpuProfileDeoptFrame> inlined_frames);
102 
103   size_t EstimatedSize() const;
104   CpuProfileDeoptInfo GetDeoptInfo();
has_deopt_info()105   bool has_deopt_info() const {
106     return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
107   }
clear_deopt_info()108   void clear_deopt_info() {
109     if (!rare_data_) return;
110     // TODO(alph): Clear rare_data_ if that was the only field in use.
111     rare_data_->deopt_reason_ = kNoDeoptReason;
112     rare_data_->deopt_id_ = kNoDeoptimizationId;
113   }
114 
code_type_string()115   const char* code_type_string() const {
116     switch (CodeTypeField::decode(bit_field_)) {
117       case CodeType::JS:
118         return "JS";
119       case CodeType::WASM:
120         return "wasm";
121       case CodeType::OTHER:
122         return "other";
123     }
124   }
125 
126   // Returns the start address of the instruction segment represented by this
127   // CodeEntry. Used as a key in the containing CodeMap.
instruction_start()128   Address instruction_start() const { return instruction_start_; }
set_instruction_start(Address address)129   void set_instruction_start(Address address) { instruction_start_ = address; }
130 
heap_object_location_address()131   Address** heap_object_location_address() { return &heap_object_location_; }
132 
133   void FillFunctionInfo(SharedFunctionInfo shared);
134 
135   void SetBuiltinId(Builtin id);
builtin()136   Builtin builtin() const { return BuiltinField::decode(bit_field_); }
137 
is_shared_cross_origin()138   bool is_shared_cross_origin() const {
139     return SharedCrossOriginField::decode(bit_field_);
140   }
141 
142   // Returns whether or not the lifetime of this CodeEntry is reference
143   // counted, and managed by a CodeMap.
is_ref_counted()144   bool is_ref_counted() const { return RefCountedField::decode(bit_field_); }
145 
146   uint32_t GetHash() const;
147   bool IsSameFunctionAs(const CodeEntry* entry) const;
148 
149   int GetSourceLine(int pc_offset) const;
150 
151   struct Equals {
operatorEquals152     bool operator()(const CodeEntry* lhs, const CodeEntry* rhs) const {
153       return lhs->IsSameFunctionAs(rhs);
154     }
155   };
156   struct Hasher {
operatorHasher157     std::size_t operator()(CodeEntry* e) const { return e->GetHash(); }
158   };
159 
160   void SetInlineStacks(
161       std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries,
162       std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
163           inline_stacks);
164   const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
165       int pc_offset) const;
166 
tag()167   CodeEventListener::LogEventsAndTags tag() const {
168     return TagField::decode(bit_field_);
169   }
170 
171   V8_EXPORT_PRIVATE static const char* const kEmptyResourceName;
172   static const char* const kEmptyBailoutReason;
173   static const char* const kNoDeoptReason;
174 
175   V8_EXPORT_PRIVATE static const char* const kProgramEntryName;
176   V8_EXPORT_PRIVATE static const char* const kIdleEntryName;
177   V8_EXPORT_PRIVATE static const char* const kGarbageCollectorEntryName;
178   // Used to represent frames for which we have no reliable way to
179   // detect function.
180   V8_EXPORT_PRIVATE static const char* const kUnresolvedFunctionName;
181   V8_EXPORT_PRIVATE static const char* const kRootEntryName;
182 
183   V8_EXPORT_PRIVATE static CodeEntry* program_entry();
184   V8_EXPORT_PRIVATE static CodeEntry* idle_entry();
185   V8_EXPORT_PRIVATE static CodeEntry* gc_entry();
186   V8_EXPORT_PRIVATE static CodeEntry* unresolved_entry();
187   V8_EXPORT_PRIVATE static CodeEntry* root_entry();
188 
189   // Releases strings owned by this CodeEntry, which may be allocated in the
190   // provided StringsStorage instance. This instance is not stored directly
191   // with the CodeEntry in order to reduce memory footprint.
192   // Called before every destruction.
193   void ReleaseStrings(StringsStorage& strings);
194 
195   void print() const;
196 
197  private:
198   friend class CodeEntryStorage;
199 
200   struct RareData {
201     const char* deopt_reason_ = kNoDeoptReason;
202     const char* bailout_reason_ = kEmptyBailoutReason;
203     int deopt_id_ = kNoDeoptimizationId;
204     std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks_;
205     std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries_;
206     std::vector<CpuProfileDeoptFrame> deopt_inlined_frames_;
207   };
208 
209   RareData* EnsureRareData();
210 
mark_ref_counted()211   void mark_ref_counted() {
212     bit_field_ = RefCountedField::update(bit_field_, true);
213     ref_count_ = 1;
214   }
215 
AddRef()216   size_t AddRef() {
217     DCHECK(is_ref_counted());
218     DCHECK_LT(ref_count_, std::numeric_limits<size_t>::max());
219     ref_count_++;
220     return ref_count_;
221   }
222 
DecRef()223   size_t DecRef() {
224     DCHECK(is_ref_counted());
225     DCHECK_GT(ref_count_, 0UL);
226     ref_count_--;
227     return ref_count_;
228   }
229 
230   using TagField = base::BitField<CodeEventListener::LogEventsAndTags, 0, 8>;
231   using BuiltinField = base::BitField<Builtin, 8, 20>;
232   static_assert(Builtins::kBuiltinCount <= BuiltinField::kNumValues,
233                 "builtin_count exceeds size of bitfield");
234   using RefCountedField = base::BitField<bool, 28, 1>;
235   using CodeTypeField = base::BitField<CodeType, 29, 2>;
236   using SharedCrossOriginField = base::BitField<bool, 31, 1>;
237 
238   std::uint32_t bit_field_;
239   std::atomic<std::size_t> ref_count_ = {0};
240   const char* name_;
241   const char* resource_name_;
242   int line_number_;
243   int column_number_;
244   int script_id_;
245   int position_;
246   std::unique_ptr<SourcePositionTable> line_info_;
247   std::unique_ptr<RareData> rare_data_;
248   Address instruction_start_ = kNullAddress;
249   Address* heap_object_location_ = nullptr;
250 };
251 
252 struct CodeEntryAndLineNumber {
253   CodeEntry* code_entry;
254   int line_number;
255 };
256 
257 using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
258 
259 // Filters stack frames from sources other than a target native context.
260 class ContextFilter {
261  public:
262   explicit ContextFilter(Address native_context_address = kNullAddress)
native_context_address_(native_context_address)263       : native_context_address_(native_context_address) {}
264 
265   // Invoked when a native context has changed address.
266   void OnMoveEvent(Address from_address, Address to_address);
267 
Accept(Address native_context_address)268   bool Accept(Address native_context_address) const {
269     if (native_context_address_ == kNullAddress) return true;
270     return (native_context_address & ~kHeapObjectTag) ==
271            native_context_address_;
272   }
273 
274   // Update the context's tracked address based on VM-thread events.
set_native_context_address(Address address)275   void set_native_context_address(Address address) {
276     native_context_address_ = address;
277   }
native_context_address()278   Address native_context_address() const { return native_context_address_; }
279 
280  private:
281   Address native_context_address_;
282 };
283 
284 class ProfileTree;
285 
286 class V8_EXPORT_PRIVATE ProfileNode {
287  public:
288   inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent,
289                      int line_number = 0);
290   ~ProfileNode();
291   ProfileNode(const ProfileNode&) = delete;
292   ProfileNode& operator=(const ProfileNode&) = delete;
293 
294   ProfileNode* FindChild(
295       CodeEntry* entry,
296       int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
297   ProfileNode* FindOrAddChild(CodeEntry* entry, int line_number = 0);
IncrementSelfTicks()298   void IncrementSelfTicks() { ++self_ticks_; }
IncreaseSelfTicks(unsigned amount)299   void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
300   void IncrementLineTicks(int src_line);
301 
entry()302   CodeEntry* entry() const { return entry_; }
self_ticks()303   unsigned self_ticks() const { return self_ticks_; }
children()304   const std::vector<ProfileNode*>* children() const { return &children_list_; }
id()305   unsigned id() const { return id_; }
parent()306   ProfileNode* parent() const { return parent_; }
line_number()307   int line_number() const {
308     return line_number_ != 0 ? line_number_ : entry_->line_number();
309   }
310   CpuProfileNode::SourceType source_type() const;
311 
GetHitLineCount()312   unsigned int GetHitLineCount() const {
313     return static_cast<unsigned int>(line_ticks_.size());
314   }
315   bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
316                     unsigned int length) const;
317   void CollectDeoptInfo(CodeEntry* entry);
deopt_infos()318   const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
319     return deopt_infos_;
320   }
321   Isolate* isolate() const;
322 
323   void Print(int indent) const;
324 
325  private:
326   struct Equals {
operatorEquals327     bool operator()(CodeEntryAndLineNumber lhs,
328                     CodeEntryAndLineNumber rhs) const {
329       return lhs.code_entry->IsSameFunctionAs(rhs.code_entry) &&
330              lhs.line_number == rhs.line_number;
331     }
332   };
333   struct Hasher {
operatorHasher334     std::size_t operator()(CodeEntryAndLineNumber pair) const {
335       return pair.code_entry->GetHash() ^ ComputeUnseededHash(pair.line_number);
336     }
337   };
338 
339   ProfileTree* tree_;
340   CodeEntry* entry_;
341   unsigned self_ticks_;
342   std::unordered_map<CodeEntryAndLineNumber, ProfileNode*, Hasher, Equals>
343       children_;
344   int line_number_;
345   std::vector<ProfileNode*> children_list_;
346   ProfileNode* parent_;
347   unsigned id_;
348   // maps line number --> number of ticks
349   std::unordered_map<int, int> line_ticks_;
350 
351   std::vector<CpuProfileDeoptInfo> deopt_infos_;
352 };
353 
354 class CodeEntryStorage;
355 
356 class V8_EXPORT_PRIVATE ProfileTree {
357  public:
358   explicit ProfileTree(Isolate* isolate, CodeEntryStorage* storage = nullptr);
359   ~ProfileTree();
360   ProfileTree(const ProfileTree&) = delete;
361   ProfileTree& operator=(const ProfileTree&) = delete;
362 
363   using ProfilingMode = v8::CpuProfilingMode;
364 
365   ProfileNode* AddPathFromEnd(
366       const std::vector<CodeEntry*>& path,
367       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
368       bool update_stats = true);
369   ProfileNode* AddPathFromEnd(
370       const ProfileStackTrace& path,
371       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
372       bool update_stats = true,
373       ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
root()374   ProfileNode* root() const { return root_; }
next_node_id()375   unsigned next_node_id() { return next_node_id_++; }
376 
Print()377   void Print() const { root_->Print(0); }
378 
isolate()379   Isolate* isolate() const { return isolate_; }
380 
EnqueueNode(const ProfileNode * node)381   void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
pending_nodes_count()382   size_t pending_nodes_count() const { return pending_nodes_.size(); }
TakePendingNodes()383   std::vector<const ProfileNode*> TakePendingNodes() {
384     return std::move(pending_nodes_);
385   }
386 
code_entries()387   CodeEntryStorage* code_entries() { return code_entries_; }
388 
389  private:
390   template <typename Callback>
391   void TraverseDepthFirst(Callback* callback);
392 
393   std::vector<const ProfileNode*> pending_nodes_;
394 
395   unsigned next_node_id_;
396   Isolate* isolate_;
397   CodeEntryStorage* const code_entries_;
398   ProfileNode* root_;
399 };
400 
401 class CpuProfiler;
402 
403 class CpuProfile {
404  public:
405   struct SampleInfo {
406     ProfileNode* node;
407     base::TimeTicks timestamp;
408     int line;
409     StateTag state_tag;
410     EmbedderStateTag embedder_state_tag;
411   };
412 
413   V8_EXPORT_PRIVATE CpuProfile(
414       CpuProfiler* profiler, ProfilerId id, const char* title,
415       CpuProfilingOptions options,
416       std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
417   CpuProfile(const CpuProfile&) = delete;
418   CpuProfile& operator=(const CpuProfile&) = delete;
419 
420   // Checks whether or not the given TickSample should be (sub)sampled, given
421   // the sampling interval of the profiler that recorded it (in microseconds).
422   V8_EXPORT_PRIVATE bool CheckSubsample(base::TimeDelta sampling_interval);
423   // Add pc -> ... -> main() call path to the profile.
424   void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
425                int src_line, bool update_stats,
426                base::TimeDelta sampling_interval, StateTag state,
427                EmbedderStateTag embedder_state);
428   void FinishProfile();
429 
title()430   const char* title() const { return title_; }
top_down()431   const ProfileTree* top_down() const { return &top_down_; }
432 
samples_count()433   int samples_count() const { return static_cast<int>(samples_.size()); }
sample(int index)434   const SampleInfo& sample(int index) const { return samples_[index]; }
435 
sampling_interval_us()436   int64_t sampling_interval_us() const {
437     return options_.sampling_interval_us();
438   }
439 
start_time()440   base::TimeTicks start_time() const { return start_time_; }
end_time()441   base::TimeTicks end_time() const { return end_time_; }
cpu_profiler()442   CpuProfiler* cpu_profiler() const { return profiler_; }
context_filter()443   ContextFilter& context_filter() { return context_filter_; }
id()444   ProfilerId id() const { return id_; }
445 
446   void UpdateTicksScale();
447 
448   V8_EXPORT_PRIVATE void Print() const;
449 
450  private:
451   void StreamPendingTraceEvents();
452 
453   const char* title_;
454   const CpuProfilingOptions options_;
455   std::unique_ptr<DiscardedSamplesDelegate> delegate_;
456   ContextFilter context_filter_;
457   base::TimeTicks start_time_;
458   base::TimeTicks end_time_;
459   std::deque<SampleInfo> samples_;
460   ProfileTree top_down_;
461   CpuProfiler* const profiler_;
462   size_t streaming_next_sample_;
463   const ProfilerId id_;
464   // Number of microseconds worth of profiler ticks that should elapse before
465   // the next sample is recorded.
466   base::TimeDelta next_sample_delta_;
467 };
468 
469 class CpuProfileMaxSamplesCallbackTask : public v8::Task {
470  public:
CpuProfileMaxSamplesCallbackTask(std::unique_ptr<DiscardedSamplesDelegate> delegate)471   explicit CpuProfileMaxSamplesCallbackTask(
472       std::unique_ptr<DiscardedSamplesDelegate> delegate)
473       : delegate_(std::move(delegate)) {}
474 
Run()475   void Run() override { delegate_->Notify(); }
476 
477  private:
478   std::unique_ptr<DiscardedSamplesDelegate> delegate_;
479 };
480 
481 class V8_EXPORT_PRIVATE CodeMap {
482  public:
483   explicit CodeMap(CodeEntryStorage& storage);
484   ~CodeMap();
485   CodeMap(const CodeMap&) = delete;
486   CodeMap& operator=(const CodeMap&) = delete;
487 
488   // Adds the given CodeEntry to the CodeMap. The CodeMap takes ownership of
489   // the CodeEntry.
490   void AddCode(Address addr, CodeEntry* entry, unsigned size);
491   void MoveCode(Address from, Address to);
492   // Attempts to remove the given CodeEntry from the CodeMap.
493   // Returns true iff the entry was found and removed.
494   bool RemoveCode(CodeEntry*);
495   void ClearCodesInRange(Address start, Address end);
496   CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
497   void Print();
size()498   size_t size() const { return code_map_.size(); }
499 
500   size_t GetEstimatedMemoryUsage() const;
501 
code_entries()502   CodeEntryStorage& code_entries() { return code_entries_; }
503 
504   void Clear();
505 
506  private:
507   struct CodeEntryMapInfo {
508     CodeEntry* entry;
509     unsigned size;
510   };
511 
512   std::multimap<Address, CodeEntryMapInfo> code_map_;
513   CodeEntryStorage& code_entries_;
514 };
515 
516 // Manages the lifetime of CodeEntry objects, and stores shared resources
517 // between them.
518 class V8_EXPORT_PRIVATE CodeEntryStorage {
519  public:
520   template <typename... Args>
Create(Args &&...args)521   static CodeEntry* Create(Args&&... args) {
522     CodeEntry* const entry = new CodeEntry(std::forward<Args>(args)...);
523     entry->mark_ref_counted();
524     return entry;
525   }
526 
527   void AddRef(CodeEntry*);
528   void DecRef(CodeEntry*);
529 
strings()530   StringsStorage& strings() { return function_and_resource_names_; }
531 
532  private:
533   StringsStorage function_and_resource_names_;
534 };
535 
536 class V8_EXPORT_PRIVATE CpuProfilesCollection {
537  public:
538   explicit CpuProfilesCollection(Isolate* isolate);
539   CpuProfilesCollection(const CpuProfilesCollection&) = delete;
540   CpuProfilesCollection& operator=(const CpuProfilesCollection&) = delete;
541 
set_cpu_profiler(CpuProfiler * profiler)542   void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
543   CpuProfilingResult StartProfiling(
544       const char* title = nullptr, CpuProfilingOptions options = {},
545       std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
546 
547   // This Method is only visible for testing
548   CpuProfilingResult StartProfilingForTesting(ProfilerId id);
549   CpuProfile* StopProfiling(ProfilerId id);
550   bool IsLastProfileLeft(ProfilerId id);
551   CpuProfile* Lookup(const char* title);
552 
profiles()553   std::vector<std::unique_ptr<CpuProfile>>* profiles() {
554     return &finished_profiles_;
555   }
GetName(Name name)556   const char* GetName(Name name) { return resource_names_.GetName(name); }
557   void RemoveProfile(CpuProfile* profile);
558 
559   // Finds a common sampling interval dividing each CpuProfile's interval,
560   // rounded up to the nearest multiple of the CpuProfiler's sampling interval.
561   // Returns 0 if no profiles are attached.
562   base::TimeDelta GetCommonSamplingInterval() const;
563 
564   // Called from profile generator thread.
565   void AddPathToCurrentProfiles(
566       base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
567       bool update_stats, base::TimeDelta sampling_interval, StateTag state,
568       EmbedderStateTag embedder_state_tag,
569       Address native_context_address = kNullAddress,
570       Address native_embedder_context_address = kNullAddress);
571 
572   // Called from profile generator thread.
573   void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
574 
575   // Limits the number of profiles that can be simultaneously collected.
576   static const int kMaxSimultaneousProfiles = 100;
577 
578  private:
579   CpuProfilingResult StartProfiling(
580       ProfilerId id, const char* title = nullptr,
581       CpuProfilingOptions options = {},
582       std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
583   StringsStorage resource_names_;
584   std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
585   CpuProfiler* profiler_;
586 
587   // Accessed by VM thread and profile generator thread.
588   std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
589   base::Semaphore current_profiles_semaphore_;
590   static std::atomic<ProfilerId> last_id_;
591   Isolate* isolate_;
592 };
593 
594 }  // namespace internal
595 }  // namespace v8
596 
597 #endif  // V8_PROFILER_PROFILE_GENERATOR_H_
598