• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/metrics/persistent_system_profile.h"
6 
7 #include <set>
8 
9 #include "base/atomicops.h"
10 #include "base/bits.h"
11 #include "base/containers/contains.h"
12 #include "base/containers/cxx20_erase.h"
13 #include "base/debug/crash_logging.h"
14 #include "base/memory/singleton.h"
15 #include "base/metrics/persistent_memory_allocator.h"
16 #include "base/notreached.h"
17 #include "base/pickle.h"
18 #include "components/variations/active_field_trials.h"
19 
20 namespace metrics {
21 
22 namespace {
23 
24 // To provide atomic addition of records so that there is no confusion between
25 // writers and readers, all of the metadata about a record is contained in a
26 // structure that can be stored as a single atomic 32-bit word.
27 union RecordHeader {
28   struct {
29     unsigned continued : 1;  // Flag indicating if there is more after this.
30     unsigned type : 7;       // The type of this record.
31     unsigned amount : 24;    // The amount of data to follow.
32   } as_parts;
33   base::subtle::Atomic32 as_atomic;
34 };
35 
36 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150;  // SHA1(SystemProfile)
37 constexpr size_t kSystemProfileAllocSize = 4 << 10;    // 4 KiB
38 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader);
39 constexpr char kFieldTrialDeletionSentinel[] = "";
40 
41 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32),
42               "bad RecordHeader size");
43 
44 // Calculate the size of a record based on the amount of data. This adds room
45 // for the record header and rounds up to the next multiple of the record-header
46 // size.
CalculateRecordSize(size_t data_amount)47 size_t CalculateRecordSize(size_t data_amount) {
48   return base::bits::AlignUp(data_amount + sizeof(RecordHeader),
49                              sizeof(RecordHeader));
50 }
51 
52 }  // namespace
53 
RecordAllocator(base::PersistentMemoryAllocator * memory_allocator,size_t min_size)54 PersistentSystemProfile::RecordAllocator::RecordAllocator(
55     base::PersistentMemoryAllocator* memory_allocator,
56     size_t min_size)
57     : allocator_(memory_allocator),
58       has_complete_profile_(false),
59       alloc_reference_(0),
60       alloc_size_(0),
61       end_offset_(0) {
62   AddSegment(min_size);
63 }
64 
RecordAllocator(const base::PersistentMemoryAllocator * memory_allocator)65 PersistentSystemProfile::RecordAllocator::RecordAllocator(
66     const base::PersistentMemoryAllocator* memory_allocator)
67     : allocator_(
68           const_cast<base::PersistentMemoryAllocator*>(memory_allocator)),
69       alloc_reference_(0),
70       alloc_size_(0),
71       end_offset_(0) {}
72 
Reset()73 void PersistentSystemProfile::RecordAllocator::Reset() {
74   // Clear the first word of all blocks so they're known to be "empty".
75   alloc_reference_ = 0;
76   while (NextSegment()) {
77     // Get the block as a char* and cast it. It can't be fetched directly as
78     // an array of RecordHeader because that's not a fundamental type and only
79     // arrays of fundamental types are allowed.
80     RecordHeader* header =
81         reinterpret_cast<RecordHeader*>(allocator_->GetAsArray<char>(
82             alloc_reference_, kTypeIdSystemProfile, sizeof(RecordHeader)));
83     DCHECK(header);
84     base::subtle::NoBarrier_Store(&header->as_atomic, 0);
85   }
86 
87   // Reset member variables.
88   has_complete_profile_ = false;
89   alloc_reference_ = 0;
90   alloc_size_ = 0;
91   end_offset_ = 0;
92 }
93 
Write(RecordType type,base::StringPiece record)94 bool PersistentSystemProfile::RecordAllocator::Write(RecordType type,
95                                                      base::StringPiece record) {
96   const char* data = record.data();
97   size_t remaining_size = record.size();
98 
99   // Allocate space and write records until everything has been stored.
100   do {
101     if (end_offset_ == alloc_size_) {
102       if (!AddSegment(remaining_size))
103         return false;
104     }
105     // Write out as much of the data as possible. |data| and |remaining_size|
106     // are updated in place.
107     if (!WriteData(type, &data, &remaining_size))
108       return false;
109   } while (remaining_size > 0);
110 
111   return true;
112 }
113 
HasMoreData() const114 bool PersistentSystemProfile::RecordAllocator::HasMoreData() const {
115   if (alloc_reference_ == 0 && !NextSegment())
116     return false;
117 
118   char* block =
119       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
120                                    base::PersistentMemoryAllocator::kSizeAny);
121   if (!block)
122     return false;
123 
124   RecordHeader header;
125   header.as_atomic = base::subtle::Acquire_Load(
126       reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
127   return header.as_parts.type != kUnusedSpace;
128 }
129 
Read(RecordType * type,std::string * record) const130 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type,
131                                                     std::string* record) const {
132   *type = kUnusedSpace;
133   record->clear();
134 
135   // Access data and read records until everything has been loaded.
136   while (true) {
137     if (end_offset_ == alloc_size_) {
138       if (!NextSegment())
139         return false;
140     }
141     if (ReadData(type, record))
142       return *type != kUnusedSpace;
143   }
144 }
145 
NextSegment() const146 bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
147   base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_);
148   alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
149   alloc_size_ = allocator_->GetAllocSize(alloc_reference_);
150   end_offset_ = 0;
151   return alloc_reference_ != 0;
152 }
153 
AddSegment(size_t min_size)154 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) {
155   if (NextSegment()) {
156     // The first record-header should have been zeroed as part of the allocation
157     // or by the "reset" procedure.
158     DCHECK_EQ(0, base::subtle::NoBarrier_Load(
159                      allocator_->GetAsArray<base::subtle::Atomic32>(
160                          alloc_reference_, kTypeIdSystemProfile, 1)));
161     return true;
162   }
163 
164   DCHECK_EQ(0U, alloc_reference_);
165   DCHECK_EQ(0U, end_offset_);
166 
167   size_t size =
168       std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize);
169 
170   uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
171   if (!ref)
172     return false;  // Allocator must be full.
173   allocator_->MakeIterable(ref);
174 
175   alloc_reference_ = ref;
176   alloc_size_ = allocator_->GetAllocSize(ref);
177   return true;
178 }
179 
WriteData(RecordType type,const char ** data,size_t * data_size)180 bool PersistentSystemProfile::RecordAllocator::WriteData(RecordType type,
181                                                          const char** data,
182                                                          size_t* data_size) {
183   char* block =
184       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
185                                    base::PersistentMemoryAllocator::kSizeAny);
186   if (!block)
187     return false;  // It's bad if there is no accessible block.
188 
189   const size_t max_write_size = std::min(
190       kMaxRecordSize, alloc_size_ - end_offset_ - sizeof(RecordHeader));
191   const size_t write_size = std::min(*data_size, max_write_size);
192   const size_t record_size = CalculateRecordSize(write_size);
193   DCHECK_LT(write_size, record_size);
194 
195   // Write the data and the record header.
196   RecordHeader header;
197   header.as_atomic = 0;
198   header.as_parts.type = type;
199   header.as_parts.amount = write_size;
200   header.as_parts.continued = (write_size < *data_size);
201   size_t offset = end_offset_;
202   end_offset_ += record_size;
203   DCHECK_GE(alloc_size_, end_offset_);
204   if (end_offset_ < alloc_size_) {
205     // An empty record header has to be next before this one gets written.
206     base::subtle::NoBarrier_Store(
207         reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0);
208   }
209   memcpy(block + offset + sizeof(header), *data, write_size);
210   base::subtle::Release_Store(
211       reinterpret_cast<base::subtle::Atomic32*>(block + offset),
212       header.as_atomic);
213 
214   // Account for what was stored and prepare for follow-on records with any
215   // remaining data.
216   *data += write_size;
217   *data_size -= write_size;
218 
219   return true;
220 }
221 
ReadData(RecordType * type,std::string * record) const222 bool PersistentSystemProfile::RecordAllocator::ReadData(
223     RecordType* type,
224     std::string* record) const {
225   DCHECK_GT(alloc_size_, end_offset_);
226 
227   char* block =
228       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
229                                    base::PersistentMemoryAllocator::kSizeAny);
230   if (!block) {
231     *type = kUnusedSpace;
232     return true;  // No more data.
233   }
234 
235   // Get and validate the record header.
236   RecordHeader header;
237   header.as_atomic = base::subtle::Acquire_Load(
238       reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
239   bool continued = !!header.as_parts.continued;
240   if (header.as_parts.type == kUnusedSpace) {
241     *type = kUnusedSpace;
242     return true;  // End of all records.
243   } else if (*type == kUnusedSpace) {
244     *type = static_cast<RecordType>(header.as_parts.type);
245   } else if (*type != header.as_parts.type) {
246     DUMP_WILL_BE_NOTREACHED_NORETURN();  // Continuation didn't match start of
247                                          // record.
248     *type = kUnusedSpace;
249     record->clear();
250     return false;
251   }
252   size_t read_size = header.as_parts.amount;
253   if (end_offset_ + sizeof(header) + read_size > alloc_size_) {
254 #if !BUILDFLAG(IS_NACL)
255     // TODO(crbug/1432981): Remove these. They are used to investigate
256     // unexpected failures.
257     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "end_offset_",
258                             end_offset_);
259     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "read_size", read_size);
260     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "alloc_size_",
261                             alloc_size_);
262 #endif  // !BUILDFLAG(IS_NACL)
263 
264     DUMP_WILL_BE_NOTREACHED_NORETURN();  // Invalid header amount.
265     *type = kUnusedSpace;
266     return true;  // Don't try again.
267   }
268 
269   // Append the record data to the output string.
270   record->append(block + end_offset_ + sizeof(header), read_size);
271   end_offset_ += CalculateRecordSize(read_size);
272   DCHECK_GE(alloc_size_, end_offset_);
273 
274   return !continued;
275 }
276 
PersistentSystemProfile()277 PersistentSystemProfile::PersistentSystemProfile() {}
278 
~PersistentSystemProfile()279 PersistentSystemProfile::~PersistentSystemProfile() {}
280 
RegisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)281 void PersistentSystemProfile::RegisterPersistentAllocator(
282     base::PersistentMemoryAllocator* memory_allocator) {
283   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
284 
285   // Create and store the allocator. A |min_size| of "1" ensures that a memory
286   // block is reserved now.
287   RecordAllocator allocator(memory_allocator, 1);
288   allocators_.push_back(std::move(allocator));
289   all_have_complete_profile_ = false;
290 }
291 
DeregisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)292 void PersistentSystemProfile::DeregisterPersistentAllocator(
293     base::PersistentMemoryAllocator* memory_allocator) {
294   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
295 
296   // This would be more efficient with a std::map but it's not expected that
297   // allocators will get deregistered with any frequency, if at all.
298   base::EraseIf(allocators_, [=](RecordAllocator& records) {
299     return records.allocator() == memory_allocator;
300   });
301 }
302 
SetSystemProfile(const std::string & serialized_profile,bool complete)303 void PersistentSystemProfile::SetSystemProfile(
304     const std::string& serialized_profile,
305     bool complete) {
306   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
307 
308   if (allocators_.empty() || serialized_profile.empty())
309     return;
310 
311   for (auto& allocator : allocators_) {
312     // Don't overwrite a complete profile with an incomplete one.
313     if (!complete && allocator.has_complete_profile())
314       continue;
315     // System profile always starts fresh.
316     allocator.Reset();
317     // Write out the serialized profile.
318     allocator.Write(kSystemProfileProto, serialized_profile);
319     // Indicate if this is a complete profile.
320     if (complete)
321       allocator.set_complete_profile();
322   }
323 
324   if (complete)
325     all_have_complete_profile_ = true;
326 }
327 
SetSystemProfile(const SystemProfileProto & profile,bool complete)328 void PersistentSystemProfile::SetSystemProfile(
329     const SystemProfileProto& profile,
330     bool complete) {
331   // Avoid serialization if passed profile is not complete and all allocators
332   // already have complete ones.
333   if (!complete && all_have_complete_profile_)
334     return;
335 
336   std::string serialized_profile;
337   if (!profile.SerializeToString(&serialized_profile))
338     return;
339   SetSystemProfile(serialized_profile, complete);
340 }
341 
AddFieldTrial(base::StringPiece trial,base::StringPiece group)342 void PersistentSystemProfile::AddFieldTrial(base::StringPiece trial,
343                                             base::StringPiece group) {
344   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
345   DCHECK(!trial.empty());
346 
347   base::Pickle pickler;
348   pickler.WriteString(trial);
349   pickler.WriteString(group);
350 
351   WriteToAll(kFieldTrialInfo,
352              base::StringPiece(pickler.data_as_char(), pickler.size()));
353 }
354 
RemoveFieldTrial(base::StringPiece trial)355 void PersistentSystemProfile::RemoveFieldTrial(base::StringPiece trial) {
356   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
357   DCHECK(!trial.empty());
358 
359   base::Pickle pickler;
360   pickler.WriteString(trial);
361   pickler.WriteString(kFieldTrialDeletionSentinel);
362 
363   WriteToAll(kFieldTrialInfo,
364              base::StringPiece(pickler.data_as_char(), pickler.size()));
365 }
366 // static
HasSystemProfile(const base::PersistentMemoryAllocator & memory_allocator)367 bool PersistentSystemProfile::HasSystemProfile(
368     const base::PersistentMemoryAllocator& memory_allocator) {
369   const RecordAllocator records(&memory_allocator);
370   return records.HasMoreData();
371 }
372 
373 // static
GetSystemProfile(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)374 bool PersistentSystemProfile::GetSystemProfile(
375     const base::PersistentMemoryAllocator& memory_allocator,
376     SystemProfileProto* system_profile) {
377   const RecordAllocator records(&memory_allocator);
378 
379   RecordType type;
380   std::string record;
381   do {
382     if (!records.Read(&type, &record))
383       return false;
384   } while (type != kSystemProfileProto);
385 
386   if (!system_profile)
387     return true;
388 
389   if (!system_profile->ParseFromString(record))
390     return false;
391 
392   MergeUpdateRecords(memory_allocator, system_profile);
393   return true;
394 }
395 
396 // static
MergeUpdateRecords(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)397 void PersistentSystemProfile::MergeUpdateRecords(
398     const base::PersistentMemoryAllocator& memory_allocator,
399     SystemProfileProto* system_profile) {
400   const RecordAllocator records(&memory_allocator);
401 
402   RecordType type;
403   std::string record;
404   std::map<uint32_t, uint32_t> field_trials;
405   bool updated = false;
406 
407   // This is done separate from the code that gets the profile because it
408   // compartmentalizes the code and makes it possible to reuse this section
409   // should it be needed to merge "update" records into a new "complete"
410   // system profile that somehow didn't get all the updates.
411   while (records.Read(&type, &record)) {
412     switch (type) {
413       case kUnusedSpace:
414         // These should never be returned.
415         NOTREACHED();
416         break;
417 
418       case kSystemProfileProto:
419         // Profile was passed in; ignore this one.
420         break;
421 
422       case kFieldTrialInfo: {
423         // Get the set of known trial IDs so duplicates don't get added.
424         if (field_trials.empty()) {
425           for (int i = 0; i < system_profile->field_trial_size(); ++i) {
426             field_trials[system_profile->field_trial(i).name_id()] =
427                 system_profile->field_trial(i).group_id();
428           }
429         }
430 
431         base::Pickle pickler(record.data(), record.size());
432         base::PickleIterator iter(pickler);
433         base::StringPiece trial;
434         base::StringPiece group;
435         if (iter.ReadStringPiece(&trial) && iter.ReadStringPiece(&group)) {
436           variations::ActiveGroupId field_ids =
437               variations::MakeActiveGroupId(trial, group);
438           if (group == kFieldTrialDeletionSentinel) {
439             field_trials.erase(field_ids.name);
440           } else {
441             field_trials[field_ids.name] = field_ids.group;
442           }
443         }
444         updated = true;
445       } break;
446     }
447   }
448 
449   // Skip rewriting the field trials if there was no update.
450   if (!updated) {
451     return;
452   }
453 
454   // Rewrite the full list of field trials to avoid duplicates.
455   system_profile->clear_field_trial();
456 
457   for (const auto& trial : field_trials) {
458     SystemProfileProto::FieldTrial* field_trial =
459         system_profile->add_field_trial();
460     field_trial->set_name_id(trial.first);
461     field_trial->set_group_id(trial.second);
462   }
463 }
464 
WriteToAll(RecordType type,base::StringPiece record)465 void PersistentSystemProfile::WriteToAll(RecordType type,
466                                          base::StringPiece record) {
467   for (auto& allocator : allocators_)
468     allocator.Write(type, record);
469 }
470 
GetInstance()471 GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() {
472   return base::Singleton<
473       GlobalPersistentSystemProfile,
474       base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get();
475 }
476 
477 }  // namespace metrics
478