• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40285824): Remove this and convert code to safer constructs.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "components/metrics/persistent_system_profile.h"
11 
12 #include <set>
13 #include <string_view>
14 #include <vector>
15 
16 #include "base/atomicops.h"
17 #include "base/bits.h"
18 #include "base/containers/contains.h"
19 #include "base/containers/span.h"
20 #include "base/debug/crash_logging.h"
21 #include "base/memory/singleton.h"
22 #include "base/metrics/persistent_memory_allocator.h"
23 #include "base/notreached.h"
24 #include "base/pickle.h"
25 #include "components/variations/active_field_trials.h"
26 
27 namespace metrics {
28 
29 namespace {
30 
31 // To provide atomic addition of records so that there is no confusion between
32 // writers and readers, all of the metadata about a record is contained in a
33 // structure that can be stored as a single atomic 32-bit word.
34 union RecordHeader {
35   struct {
36     unsigned continued : 1;  // Flag indicating if there is more after this.
37     unsigned type : 7;       // The type of this record.
38     unsigned amount : 24;    // The amount of data to follow.
39   } as_parts;
40   base::subtle::Atomic32 as_atomic;
41 };
42 
43 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150;  // SHA1(SystemProfile)
44 constexpr size_t kSystemProfileAllocSize = 4 << 10;    // 4 KiB
45 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader);
46 constexpr char kFieldTrialDeletionSentinel[] = "";
47 
48 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32),
49               "bad RecordHeader size");
50 
51 // Calculate the size of a record based on the amount of data. This adds room
52 // for the record header and rounds up to the next multiple of the record-header
53 // size.
CalculateRecordSize(size_t data_amount)54 size_t CalculateRecordSize(size_t data_amount) {
55   return base::bits::AlignUp(data_amount + sizeof(RecordHeader),
56                              sizeof(RecordHeader));
57 }
58 
59 }  // namespace
60 
RecordAllocator(base::PersistentMemoryAllocator * memory_allocator,size_t min_size)61 PersistentSystemProfile::RecordAllocator::RecordAllocator(
62     base::PersistentMemoryAllocator* memory_allocator,
63     size_t min_size)
64     : allocator_(memory_allocator),
65       has_complete_profile_(false),
66       alloc_reference_(0),
67       alloc_size_(0),
68       end_offset_(0) {
69   AddSegment(min_size);
70 }
71 
RecordAllocator(const base::PersistentMemoryAllocator * memory_allocator)72 PersistentSystemProfile::RecordAllocator::RecordAllocator(
73     const base::PersistentMemoryAllocator* memory_allocator)
74     : allocator_(
75           const_cast<base::PersistentMemoryAllocator*>(memory_allocator)),
76       alloc_reference_(0),
77       alloc_size_(0),
78       end_offset_(0) {}
79 
Reset()80 void PersistentSystemProfile::RecordAllocator::Reset() {
81   // Clear the first word of all blocks so they're known to be "empty".
82   alloc_reference_ = 0;
83   while (NextSegment()) {
84     // Get the block as a char* and cast it. It can't be fetched directly as
85     // an array of RecordHeader because that's not a fundamental type and only
86     // arrays of fundamental types are allowed.
87     RecordHeader* header =
88         reinterpret_cast<RecordHeader*>(allocator_->GetAsArray<char>(
89             alloc_reference_, kTypeIdSystemProfile, sizeof(RecordHeader)));
90     DCHECK(header);
91     base::subtle::NoBarrier_Store(&header->as_atomic, 0);
92   }
93 
94   // Reset member variables.
95   has_complete_profile_ = false;
96   alloc_reference_ = 0;
97   alloc_size_ = 0;
98   end_offset_ = 0;
99 }
100 
Write(RecordType type,std::string_view record)101 bool PersistentSystemProfile::RecordAllocator::Write(RecordType type,
102                                                      std::string_view record) {
103   const char* data = record.data();
104   size_t remaining_size = record.size();
105 
106   // Allocate space and write records until everything has been stored.
107   do {
108     if (end_offset_ == alloc_size_) {
109       if (!AddSegment(remaining_size))
110         return false;
111     }
112     // Write out as much of the data as possible. |data| and |remaining_size|
113     // are updated in place.
114     if (!WriteData(type, &data, &remaining_size))
115       return false;
116   } while (remaining_size > 0);
117 
118   return true;
119 }
120 
HasMoreData() const121 bool PersistentSystemProfile::RecordAllocator::HasMoreData() const {
122   if (alloc_reference_ == 0 && !NextSegment())
123     return false;
124 
125   char* block =
126       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
127                                    base::PersistentMemoryAllocator::kSizeAny);
128   if (!block)
129     return false;
130 
131   RecordHeader header;
132   header.as_atomic = base::subtle::Acquire_Load(
133       reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
134   return header.as_parts.type != kUnusedSpace;
135 }
136 
Read(RecordType * type,std::string * record) const137 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type,
138                                                     std::string* record) const {
139   *type = kUnusedSpace;
140   record->clear();
141 
142   // Access data and read records until everything has been loaded.
143   while (true) {
144     if (end_offset_ == alloc_size_) {
145       if (!NextSegment())
146         return false;
147     }
148     if (ReadData(type, record))
149       return *type != kUnusedSpace;
150   }
151 }
152 
NextSegment() const153 bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
154   base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_);
155   alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
156   alloc_size_ = allocator_->GetAllocSize(alloc_reference_);
157   end_offset_ = 0;
158   return alloc_reference_ != 0;
159 }
160 
AddSegment(size_t min_size)161 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) {
162   if (NextSegment()) {
163     // The first record-header should have been zeroed as part of the allocation
164     // or by the "reset" procedure.
165     DCHECK_EQ(0, base::subtle::NoBarrier_Load(
166                      allocator_->GetAsArray<base::subtle::Atomic32>(
167                          alloc_reference_, kTypeIdSystemProfile, 1)));
168     return true;
169   }
170 
171   DCHECK_EQ(0U, alloc_reference_);
172   DCHECK_EQ(0U, end_offset_);
173 
174   size_t size =
175       std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize);
176 
177   uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
178   if (!ref)
179     return false;  // Allocator must be full.
180   allocator_->MakeIterable(ref);
181 
182   alloc_reference_ = ref;
183   alloc_size_ = allocator_->GetAllocSize(ref);
184   return true;
185 }
186 
WriteData(RecordType type,const char ** data,size_t * data_size)187 bool PersistentSystemProfile::RecordAllocator::WriteData(RecordType type,
188                                                          const char** data,
189                                                          size_t* data_size) {
190   char* block =
191       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
192                                    base::PersistentMemoryAllocator::kSizeAny);
193   if (!block)
194     return false;  // It's bad if there is no accessible block.
195 
196   const size_t max_write_size = std::min(
197       kMaxRecordSize, alloc_size_ - end_offset_ - sizeof(RecordHeader));
198   const size_t write_size = std::min(*data_size, max_write_size);
199   const size_t record_size = CalculateRecordSize(write_size);
200   DCHECK_LT(write_size, record_size);
201 
202   // Write the data and the record header.
203   RecordHeader header;
204   header.as_atomic = 0;
205   header.as_parts.type = type;
206   header.as_parts.amount = write_size;
207   header.as_parts.continued = (write_size < *data_size);
208   size_t offset = end_offset_;
209   end_offset_ += record_size;
210   DCHECK_GE(alloc_size_, end_offset_);
211   if (end_offset_ < alloc_size_) {
212     // An empty record header has to be next before this one gets written.
213     base::subtle::NoBarrier_Store(
214         reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0);
215   }
216   memcpy(block + offset + sizeof(header), *data, write_size);
217   base::subtle::Release_Store(
218       reinterpret_cast<base::subtle::Atomic32*>(block + offset),
219       header.as_atomic);
220 
221   // Account for what was stored and prepare for follow-on records with any
222   // remaining data.
223   *data += write_size;
224   *data_size -= write_size;
225 
226   return true;
227 }
228 
ReadData(RecordType * type,std::string * record) const229 bool PersistentSystemProfile::RecordAllocator::ReadData(
230     RecordType* type,
231     std::string* record) const {
232   DCHECK_GT(alloc_size_, end_offset_);
233 
234   char* block =
235       allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
236                                    base::PersistentMemoryAllocator::kSizeAny);
237   if (!block) {
238     *type = kUnusedSpace;
239     return true;  // No more data.
240   }
241 
242   // Get and validate the record header.
243   RecordHeader header;
244   header.as_atomic = base::subtle::Acquire_Load(
245       reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
246   bool continued = !!header.as_parts.continued;
247   if (header.as_parts.type == kUnusedSpace) {
248     *type = kUnusedSpace;
249     return true;  // End of all records.
250   } else if (*type == kUnusedSpace) {
251     *type = static_cast<RecordType>(header.as_parts.type);
252   } else if (*type != header.as_parts.type) {
253     DUMP_WILL_BE_NOTREACHED();  // Continuation didn't match start of
254                                 // record.
255     *type = kUnusedSpace;
256     record->clear();
257     return false;
258   }
259   size_t read_size = header.as_parts.amount;
260   if (end_offset_ + sizeof(header) + read_size > alloc_size_) {
261 #if !BUILDFLAG(IS_NACL)
262     // TODO(crbug.com/40064026): Remove these. They are used to investigate
263     // unexpected failures.
264     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "end_offset_",
265                             end_offset_);
266     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "read_size", read_size);
267     SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "alloc_size_",
268                             alloc_size_);
269 #endif  // !BUILDFLAG(IS_NACL)
270 
271     DUMP_WILL_BE_NOTREACHED();  // Invalid header amount.
272     *type = kUnusedSpace;
273     return true;  // Don't try again.
274   }
275 
276   // Append the record data to the output string.
277   record->append(block + end_offset_ + sizeof(header), read_size);
278   end_offset_ += CalculateRecordSize(read_size);
279   DCHECK_GE(alloc_size_, end_offset_);
280 
281   return !continued;
282 }
283 
284 PersistentSystemProfile::PersistentSystemProfile() = default;
285 
286 PersistentSystemProfile::~PersistentSystemProfile() = default;
287 
RegisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)288 void PersistentSystemProfile::RegisterPersistentAllocator(
289     base::PersistentMemoryAllocator* memory_allocator) {
290   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
291 
292   // Create and store the allocator. A |min_size| of "1" ensures that a memory
293   // block is reserved now.
294   RecordAllocator allocator(memory_allocator, 1);
295   allocators_.push_back(std::move(allocator));
296   all_have_complete_profile_ = false;
297 }
298 
DeregisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)299 void PersistentSystemProfile::DeregisterPersistentAllocator(
300     base::PersistentMemoryAllocator* memory_allocator) {
301   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
302 
303   // This would be more efficient with a std::map but it's not expected that
304   // allocators will get deregistered with any frequency, if at all.
305   std::erase_if(allocators_, [=](RecordAllocator& records) {
306     return records.allocator() == memory_allocator;
307   });
308 }
309 
SetSystemProfile(const std::string & serialized_profile,bool complete)310 void PersistentSystemProfile::SetSystemProfile(
311     const std::string& serialized_profile,
312     bool complete) {
313   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
314 
315   if (allocators_.empty() || serialized_profile.empty())
316     return;
317 
318   for (auto& allocator : allocators_) {
319     // Don't overwrite a complete profile with an incomplete one.
320     if (!complete && allocator.has_complete_profile())
321       continue;
322     // System profile always starts fresh.
323     allocator.Reset();
324     // Write out the serialized profile.
325     allocator.Write(kSystemProfileProto, serialized_profile);
326     // Indicate if this is a complete profile.
327     if (complete)
328       allocator.set_complete_profile();
329   }
330 
331   if (complete)
332     all_have_complete_profile_ = true;
333 }
334 
SetSystemProfile(const SystemProfileProto & profile,bool complete)335 void PersistentSystemProfile::SetSystemProfile(
336     const SystemProfileProto& profile,
337     bool complete) {
338   // Avoid serialization if passed profile is not complete and all allocators
339   // already have complete ones.
340   if (!complete && all_have_complete_profile_)
341     return;
342 
343   std::string serialized_profile;
344   if (!profile.SerializeToString(&serialized_profile))
345     return;
346   SetSystemProfile(serialized_profile, complete);
347 }
348 
AddFieldTrial(std::string_view trial,std::string_view group)349 void PersistentSystemProfile::AddFieldTrial(std::string_view trial,
350                                             std::string_view group) {
351   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
352   DCHECK(!trial.empty());
353 
354   base::Pickle pickler;
355   pickler.WriteString(trial);
356   pickler.WriteString(group);
357 
358   WriteToAll(kFieldTrialInfo,
359              std::string_view(pickler.data_as_char(), pickler.size()));
360 }
361 
RemoveFieldTrial(std::string_view trial)362 void PersistentSystemProfile::RemoveFieldTrial(std::string_view trial) {
363   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
364   DCHECK(!trial.empty());
365 
366   base::Pickle pickler;
367   pickler.WriteString(trial);
368   pickler.WriteString(kFieldTrialDeletionSentinel);
369 
370   WriteToAll(kFieldTrialInfo,
371              std::string_view(pickler.data_as_char(), pickler.size()));
372 }
373 // static
HasSystemProfile(const base::PersistentMemoryAllocator & memory_allocator)374 bool PersistentSystemProfile::HasSystemProfile(
375     const base::PersistentMemoryAllocator& memory_allocator) {
376   const RecordAllocator records(&memory_allocator);
377   return records.HasMoreData();
378 }
379 
380 // static
GetSystemProfile(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)381 bool PersistentSystemProfile::GetSystemProfile(
382     const base::PersistentMemoryAllocator& memory_allocator,
383     SystemProfileProto* system_profile) {
384   const RecordAllocator records(&memory_allocator);
385 
386   RecordType type;
387   std::string record;
388   do {
389     if (!records.Read(&type, &record))
390       return false;
391   } while (type != kSystemProfileProto);
392 
393   if (!system_profile)
394     return true;
395 
396   if (!system_profile->ParseFromString(record))
397     return false;
398 
399   MergeUpdateRecords(memory_allocator, system_profile);
400   return true;
401 }
402 
403 // static
MergeUpdateRecords(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)404 void PersistentSystemProfile::MergeUpdateRecords(
405     const base::PersistentMemoryAllocator& memory_allocator,
406     SystemProfileProto* system_profile) {
407   const RecordAllocator records(&memory_allocator);
408 
409   RecordType type;
410   std::string record;
411   std::map<uint32_t, uint32_t> field_trials;
412   bool updated = false;
413 
414   // This is done separate from the code that gets the profile because it
415   // compartmentalizes the code and makes it possible to reuse this section
416   // should it be needed to merge "update" records into a new "complete"
417   // system profile that somehow didn't get all the updates.
418   while (records.Read(&type, &record)) {
419     switch (type) {
420       case kUnusedSpace:
421         // These should never be returned.
422         NOTREACHED();
423 
424       case kSystemProfileProto:
425         // Profile was passed in; ignore this one.
426         break;
427 
428       case kFieldTrialInfo: {
429         // Get the set of known trial IDs so duplicates don't get added.
430         if (field_trials.empty()) {
431           for (int i = 0; i < system_profile->field_trial_size(); ++i) {
432             field_trials[system_profile->field_trial(i).name_id()] =
433                 system_profile->field_trial(i).group_id();
434           }
435         }
436 
437         base::Pickle pickler =
438             base::Pickle::WithUnownedBuffer(base::as_byte_span(record));
439         base::PickleIterator iter(pickler);
440         std::string_view trial;
441         std::string_view group;
442         if (iter.ReadStringPiece(&trial) && iter.ReadStringPiece(&group)) {
443           variations::ActiveGroupId field_ids =
444               variations::MakeActiveGroupId(trial, group);
445           if (group == kFieldTrialDeletionSentinel) {
446             field_trials.erase(field_ids.name);
447           } else {
448             field_trials[field_ids.name] = field_ids.group;
449           }
450         }
451         updated = true;
452       } break;
453     }
454   }
455 
456   // Skip rewriting the field trials if there was no update.
457   if (!updated) {
458     return;
459   }
460 
461   // Rewrite the full list of field trials to avoid duplicates.
462   system_profile->clear_field_trial();
463 
464   for (const auto& trial : field_trials) {
465     SystemProfileProto::FieldTrial* field_trial =
466         system_profile->add_field_trial();
467     field_trial->set_name_id(trial.first);
468     field_trial->set_group_id(trial.second);
469   }
470 }
471 
WriteToAll(RecordType type,std::string_view record)472 void PersistentSystemProfile::WriteToAll(RecordType type,
473                                          std::string_view record) {
474   for (auto& allocator : allocators_)
475     allocator.Write(type, record);
476 }
477 
GetInstance()478 GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() {
479   return base::Singleton<
480       GlobalPersistentSystemProfile,
481       base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get();
482 }
483 
484 }  // namespace metrics
485