• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #define PW_LOG_MODULE_NAME "KVS"
16 #define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
17 
18 #include "pw_kvs/key_value_store.h"
19 
20 #include <algorithm>
21 #include <cinttypes>
22 #include <cstring>
23 #include <type_traits>
24 
25 #include "pw_assert/check.h"
26 #include "pw_kvs_private/config.h"
27 #include "pw_log/shorter.h"
28 #include "pw_status/try.h"
29 
30 namespace pw::kvs {
31 namespace {
32 
33 using std::byte;
34 
InvalidKey(Key key)35 constexpr bool InvalidKey(Key key) {
36   return key.empty() || (key.size() > internal::Entry::kMaxKeyLength);
37 }
38 
39 }  // namespace
40 
KeyValueStore(FlashPartition * partition,std::span<const EntryFormat> formats,const Options & options,size_t redundancy,Vector<SectorDescriptor> & sector_descriptor_list,const SectorDescriptor ** temp_sectors_to_skip,Vector<KeyDescriptor> & key_descriptor_list,Address * addresses)41 KeyValueStore::KeyValueStore(FlashPartition* partition,
42                              std::span<const EntryFormat> formats,
43                              const Options& options,
44                              size_t redundancy,
45                              Vector<SectorDescriptor>& sector_descriptor_list,
46                              const SectorDescriptor** temp_sectors_to_skip,
47                              Vector<KeyDescriptor>& key_descriptor_list,
48                              Address* addresses)
49     : partition_(*partition),
50       formats_(formats),
51       sectors_(sector_descriptor_list, *partition, temp_sectors_to_skip),
52       entry_cache_(key_descriptor_list, addresses, redundancy),
53       options_(options),
54       initialized_(InitializationState::kNotInitialized),
55       error_detected_(false),
56       internal_stats_({}),
57       last_transaction_id_(0) {}
58 
Init()59 Status KeyValueStore::Init() {
60   initialized_ = InitializationState::kNotInitialized;
61   error_detected_ = false;
62   last_transaction_id_ = 0;
63 
64   INF("Initializing key value store");
65   if (partition_.sector_count() > sectors_.max_size()) {
66     ERR("KVS init failed: kMaxUsableSectors (=%u) must be at least as "
67         "large as the number of sectors in the flash partition (=%u)",
68         unsigned(sectors_.max_size()),
69         unsigned(partition_.sector_count()));
70     return Status::FailedPrecondition();
71   }
72 
73   if (partition_.sector_count() < 2) {
74     ERR("KVS init failed: FlashParition sector count (=%u) must be at 2. KVS "
75         "requires at least 1 working sector + 1 free/reserved sector",
76         unsigned(partition_.sector_count()));
77     return Status::FailedPrecondition();
78   }
79 
80   const size_t sector_size_bytes = partition_.sector_size_bytes();
81 
82   // TODO: investigate doing this as a static assert/compile-time check.
83   if (sector_size_bytes > SectorDescriptor::max_sector_size()) {
84     ERR("KVS init failed: sector_size_bytes (=%u) is greater than maximum "
85         "allowed sector size (=%u)",
86         unsigned(sector_size_bytes),
87         unsigned(SectorDescriptor::max_sector_size()));
88     return Status::FailedPrecondition();
89   }
90 
91   Status metadata_result = InitializeMetadata();
92 
93   if (!error_detected_) {
94     initialized_ = InitializationState::kReady;
95   } else {
96     initialized_ = InitializationState::kNeedsMaintenance;
97 
98     if (options_.recovery != ErrorRecovery::kManual) {
99       size_t pre_fix_redundancy_errors =
100           internal_stats_.missing_redundant_entries_recovered;
101       Status recovery_status = FixErrors();
102 
103       if (recovery_status.ok()) {
104         if (metadata_result.IsOutOfRange()) {
105           internal_stats_.missing_redundant_entries_recovered =
106               pre_fix_redundancy_errors;
107           INF("KVS init: Redundancy level successfully updated");
108         } else {
109           WRN("KVS init: Corruption detected and fully repaired");
110         }
111         initialized_ = InitializationState::kReady;
112       } else if (recovery_status.IsResourceExhausted()) {
113         WRN("KVS init: Unable to maintain required free sector");
114       } else {
115         WRN("KVS init: Corruption detected and unable repair");
116       }
117     } else {
118       WRN("KVS init: Corruption detected, no repair attempted due to options");
119     }
120   }
121 
122   INF("KeyValueStore init complete: active keys %u, deleted keys %u, sectors "
123       "%u, logical sector size %u bytes",
124       unsigned(size()),
125       unsigned(entry_cache_.total_entries() - size()),
126       unsigned(sectors_.size()),
127       unsigned(partition_.sector_size_bytes()));
128 
129   // Report any corruption was not repaired.
130   if (error_detected_) {
131     WRN("KVS init: Corruption found but not repaired, KVS unavailable until "
132         "successful maintenance.");
133     return Status::DataLoss();
134   }
135 
136   return OkStatus();
137 }
138 
InitializeMetadata()139 Status KeyValueStore::InitializeMetadata() {
140   const size_t sector_size_bytes = partition_.sector_size_bytes();
141 
142   sectors_.Reset();
143   entry_cache_.Reset();
144 
145   DBG("First pass: Read all entries from all sectors");
146   Address sector_address = 0;
147 
148   size_t total_corrupt_bytes = 0;
149   size_t corrupt_entries = 0;
150   bool empty_sector_found = false;
151   size_t entry_copies_missing = 0;
152 
153   for (SectorDescriptor& sector : sectors_) {
154     Address entry_address = sector_address;
155 
156     size_t sector_corrupt_bytes = 0;
157 
158     for (int num_entries_in_sector = 0; true; num_entries_in_sector++) {
159       DBG("Load entry: sector=%u, entry#=%d, address=%u",
160           unsigned(sector_address),
161           num_entries_in_sector,
162           unsigned(entry_address));
163 
164       if (!sectors_.AddressInSector(sector, entry_address)) {
165         DBG("Fell off end of sector; moving to the next sector");
166         break;
167       }
168 
169       Address next_entry_address;
170       Status status = LoadEntry(entry_address, &next_entry_address);
171       if (status.IsNotFound()) {
172         DBG("Hit un-written data in sector; moving to the next sector");
173         break;
174       } else if (!status.ok()) {
175         // The entry could not be read, indicating likely data corruption within
176         // the sector. Try to scan the remainder of the sector for other
177         // entries.
178 
179         error_detected_ = true;
180         corrupt_entries++;
181 
182         status = ScanForEntry(sector,
183                               entry_address + Entry::kMinAlignmentBytes,
184                               &next_entry_address);
185         if (!status.ok()) {
186           // No further entries in this sector. Mark the remaining bytes in the
187           // sector as corrupt (since we can't reliably know the size of the
188           // corrupt entry).
189           sector_corrupt_bytes +=
190               sector_size_bytes - (entry_address - sector_address);
191           break;
192         }
193 
194         sector_corrupt_bytes += next_entry_address - entry_address;
195       }
196 
197       // Entry loaded successfully; so get ready to load the next one.
198       entry_address = next_entry_address;
199 
200       // Update of the number of writable bytes in this sector.
201       sector.set_writable_bytes(sector_size_bytes -
202                                 (entry_address - sector_address));
203     }
204 
205     if (sector_corrupt_bytes > 0) {
206       // If the sector contains corrupt data, prevent any further entries from
207       // being written to it by indicating that it has no space. This should
208       // also make it a decent GC candidate. Valid keys in the sector are still
209       // readable as normal.
210       sector.mark_corrupt();
211       error_detected_ = true;
212 
213       WRN("Sector %u contains %uB of corrupt data",
214           sectors_.Index(sector),
215           unsigned(sector_corrupt_bytes));
216     }
217 
218     if (sector.Empty(sector_size_bytes)) {
219       empty_sector_found = true;
220     }
221     sector_address += sector_size_bytes;
222     total_corrupt_bytes += sector_corrupt_bytes;
223   }
224 
225   DBG("Second pass: Count valid bytes in each sector");
226   Address newest_key = 0;
227 
228   // For every valid entry, for each address, count the valid bytes in that
229   // sector. If the address fails to read, remove the address and mark the
230   // sector as corrupt. Track which entry has the newest transaction ID for
231   // initializing last_new_sector_.
232   for (EntryMetadata& metadata : entry_cache_) {
233     if (metadata.addresses().size() < redundancy()) {
234       DBG("Key 0x%08x missing copies, has %u, needs %u",
235           unsigned(metadata.hash()),
236           unsigned(metadata.addresses().size()),
237           unsigned(redundancy()));
238       entry_copies_missing++;
239     }
240     size_t index = 0;
241     while (index < metadata.addresses().size()) {
242       Address address = metadata.addresses()[index];
243       Entry entry;
244 
245       Status read_result = Entry::Read(partition_, address, formats_, &entry);
246 
247       SectorDescriptor& sector = sectors_.FromAddress(address);
248 
249       if (read_result.ok()) {
250         sector.AddValidBytes(entry.size());
251         index++;
252       } else {
253         corrupt_entries++;
254         total_corrupt_bytes += sector.writable_bytes();
255         error_detected_ = true;
256         sector.mark_corrupt();
257 
258         // Remove the bad address and stay at this index. The removal
259         // replaces out the removed address with the back address so
260         // this index needs to be rechecked with the new address.
261         metadata.RemoveAddress(address);
262       }
263     }
264 
265     if (metadata.IsNewerThan(last_transaction_id_)) {
266       last_transaction_id_ = metadata.transaction_id();
267       newest_key = metadata.addresses().back();
268     }
269   }
270 
271   sectors_.set_last_new_sector(newest_key);
272 
273   if (!empty_sector_found) {
274     DBG("No empty sector found");
275     error_detected_ = true;
276   }
277 
278   if (entry_copies_missing > 0) {
279     bool other_errors = error_detected_;
280     error_detected_ = true;
281 
282     if (!other_errors && entry_copies_missing == entry_cache_.total_entries()) {
283       INF("KVS configuration changed to redundancy of %u total copies per key",
284           unsigned(redundancy()));
285       return Status::OutOfRange();
286     }
287   }
288 
289   if (error_detected_) {
290     WRN("Corruption detected. Found %u corrupt bytes, %u corrupt entries, "
291         "and %u keys missing redundant copies.",
292         unsigned(total_corrupt_bytes),
293         unsigned(corrupt_entries),
294         unsigned(entry_copies_missing));
295     return Status::FailedPrecondition();
296   }
297   return OkStatus();
298 }
299 
GetStorageStats() const300 KeyValueStore::StorageStats KeyValueStore::GetStorageStats() const {
301   StorageStats stats{};
302   const size_t sector_size = partition_.sector_size_bytes();
303   bool found_empty_sector = false;
304   stats.sector_erase_count = internal_stats_.sector_erase_count;
305   stats.corrupt_sectors_recovered = internal_stats_.corrupt_sectors_recovered;
306   stats.missing_redundant_entries_recovered =
307       internal_stats_.missing_redundant_entries_recovered;
308 
309   for (const SectorDescriptor& sector : sectors_) {
310     stats.in_use_bytes += sector.valid_bytes();
311     stats.reclaimable_bytes += sector.RecoverableBytes(sector_size);
312 
313     if (!found_empty_sector && sector.Empty(sector_size)) {
314       // The KVS tries to always keep an empty sector for GC, so don't count
315       // the first empty sector seen as writable space. However, a free sector
316       // cannot always be assumed to exist; if a GC operation fails, all sectors
317       // may be partially written, in which case the space reported might be
318       // inaccurate.
319       found_empty_sector = true;
320       continue;
321     }
322 
323     stats.writable_bytes += sector.writable_bytes();
324   }
325 
326   return stats;
327 }
328 
329 // Check KVS for any error conditions. Primarily intended for test and
330 // internal use.
CheckForErrors()331 bool KeyValueStore::CheckForErrors() {
332   // Check for corrupted sectors
333   for (SectorDescriptor& sector : sectors_) {
334     if (sector.corrupt()) {
335       error_detected_ = true;
336       return error_detected();
337     }
338   }
339 
340   // Check for missing redundancy.
341   if (redundancy() > 1) {
342     for (const EntryMetadata& metadata : entry_cache_) {
343       if (metadata.addresses().size() < redundancy()) {
344         error_detected_ = true;
345         return error_detected();
346       }
347     }
348   }
349 
350   return error_detected();
351 }
352 
LoadEntry(Address entry_address,Address * next_entry_address)353 Status KeyValueStore::LoadEntry(Address entry_address,
354                                 Address* next_entry_address) {
355   Entry entry;
356   PW_TRY(Entry::Read(partition_, entry_address, formats_, &entry));
357 
358   // Read the key from flash & validate the entry (which reads the value).
359   Entry::KeyBuffer key_buffer;
360   PW_TRY_ASSIGN(size_t key_length, entry.ReadKey(key_buffer));
361   const Key key(key_buffer.data(), key_length);
362 
363   PW_TRY(entry.VerifyChecksumInFlash());
364 
365   // A valid entry was found, so update the next entry address before doing any
366   // of the checks that happen in AddNewOrUpdateExisting.
367   *next_entry_address = entry.next_address();
368   return entry_cache_.AddNewOrUpdateExisting(
369       entry.descriptor(key), entry.address(), partition_.sector_size_bytes());
370 }
371 
372 // Scans flash memory within a sector to find a KVS entry magic.
ScanForEntry(const SectorDescriptor & sector,Address start_address,Address * next_entry_address)373 Status KeyValueStore::ScanForEntry(const SectorDescriptor& sector,
374                                    Address start_address,
375                                    Address* next_entry_address) {
376   DBG("Scanning sector %u for entries starting from address %u",
377       sectors_.Index(sector),
378       unsigned(start_address));
379 
380   // Entries must start at addresses which are aligned on a multiple of
381   // Entry::kMinAlignmentBytes. However, that multiple can vary between entries.
382   // When scanning, we don't have an entry to tell us what the current alignment
383   // is, so the minimum alignment is used to be exhaustive.
384   for (Address address = AlignUp(start_address, Entry::kMinAlignmentBytes);
385        sectors_.AddressInSector(sector, address);
386        address += Entry::kMinAlignmentBytes) {
387     uint32_t magic;
388     StatusWithSize read_result =
389         partition_.Read(address, std::as_writable_bytes(std::span(&magic, 1)));
390     if (!read_result.ok()) {
391       continue;
392     }
393     if (formats_.KnownMagic(magic)) {
394       DBG("Found entry magic at address %u", unsigned(address));
395       *next_entry_address = address;
396       return OkStatus();
397     }
398   }
399 
400   return Status::NotFound();
401 }
402 
Get(Key key,std::span<byte> value_buffer,size_t offset_bytes) const403 StatusWithSize KeyValueStore::Get(Key key,
404                                   std::span<byte> value_buffer,
405                                   size_t offset_bytes) const {
406   PW_TRY_WITH_SIZE(CheckReadOperation(key));
407 
408   EntryMetadata metadata;
409   PW_TRY_WITH_SIZE(FindExisting(key, &metadata));
410 
411   return Get(key, metadata, value_buffer, offset_bytes);
412 }
413 
PutBytes(Key key,std::span<const byte> value)414 Status KeyValueStore::PutBytes(Key key, std::span<const byte> value) {
415   PW_TRY(CheckWriteOperation(key));
416   DBG("Writing key/value; key length=%u, value length=%u",
417       unsigned(key.size()),
418       unsigned(value.size()));
419 
420   if (Entry::size(partition_, key, value) > partition_.sector_size_bytes()) {
421     DBG("%u B value with %u B key cannot fit in one sector",
422         unsigned(value.size()),
423         unsigned(key.size()));
424     return Status::InvalidArgument();
425   }
426 
427   EntryMetadata metadata;
428   Status status = FindEntry(key, &metadata);
429 
430   if (status.ok()) {
431     // TODO: figure out logging how to support multiple addresses.
432     DBG("Overwriting entry for key 0x%08x in %u sectors including %u",
433         unsigned(metadata.hash()),
434         unsigned(metadata.addresses().size()),
435         sectors_.Index(metadata.first_address()));
436     return WriteEntryForExistingKey(metadata, EntryState::kValid, key, value);
437   }
438 
439   if (status.IsNotFound()) {
440     return WriteEntryForNewKey(key, value);
441   }
442 
443   return status;
444 }
445 
Delete(Key key)446 Status KeyValueStore::Delete(Key key) {
447   PW_TRY(CheckWriteOperation(key));
448 
449   EntryMetadata metadata;
450   PW_TRY(FindExisting(key, &metadata));
451 
452   // TODO: figure out logging how to support multiple addresses.
453   DBG("Writing tombstone for key 0x%08x in %u sectors including %u",
454       unsigned(metadata.hash()),
455       unsigned(metadata.addresses().size()),
456       sectors_.Index(metadata.first_address()));
457   return WriteEntryForExistingKey(metadata, EntryState::kDeleted, key, {});
458 }
459 
ReadKey()460 void KeyValueStore::Item::ReadKey() {
461   key_buffer_.fill('\0');
462 
463   Entry entry;
464   if (kvs_.ReadEntry(*iterator_, entry).ok()) {
465     entry.ReadKey(key_buffer_)
466         .IgnoreError();  // TODO(pwbug/387): Handle Status properly
467   }
468 }
469 
operator ++()470 KeyValueStore::iterator& KeyValueStore::iterator::operator++() {
471   // Skip to the next entry that is valid (not deleted).
472   while (++item_.iterator_ != item_.kvs_.entry_cache_.end() &&
473          item_.iterator_->state() != EntryState::kValid) {
474   }
475   return *this;
476 }
477 
begin() const478 KeyValueStore::iterator KeyValueStore::begin() const {
479   internal::EntryCache::const_iterator cache_iterator = entry_cache_.begin();
480   // Skip over any deleted entries at the start of the descriptor list.
481   while (cache_iterator != entry_cache_.end() &&
482          cache_iterator->state() != EntryState::kValid) {
483     ++cache_iterator;
484   }
485   return iterator(*this, cache_iterator);
486 }
487 
ValueSize(Key key) const488 StatusWithSize KeyValueStore::ValueSize(Key key) const {
489   PW_TRY_WITH_SIZE(CheckReadOperation(key));
490 
491   EntryMetadata metadata;
492   PW_TRY_WITH_SIZE(FindExisting(key, &metadata));
493 
494   return ValueSize(metadata);
495 }
496 
ReadEntry(const EntryMetadata & metadata,Entry & entry) const497 Status KeyValueStore::ReadEntry(const EntryMetadata& metadata,
498                                 Entry& entry) const {
499   // Try to read an entry
500   Status read_result = Status::DataLoss();
501   for (Address address : metadata.addresses()) {
502     read_result = Entry::Read(partition_, address, formats_, &entry);
503     if (read_result.ok()) {
504       return read_result;
505     }
506 
507     // Found a bad address. Set the sector as corrupt.
508     error_detected_ = true;
509     sectors_.FromAddress(address).mark_corrupt();
510   }
511 
512   ERR("No valid entries for key. Data has been lost!");
513   return read_result;
514 }
515 
FindEntry(Key key,EntryMetadata * metadata_out) const516 Status KeyValueStore::FindEntry(Key key, EntryMetadata* metadata_out) const {
517   StatusWithSize find_result =
518       entry_cache_.Find(partition_, sectors_, formats_, key, metadata_out);
519 
520   if (find_result.size() > 0u) {
521     error_detected_ = true;
522   }
523   return find_result.status();
524 }
525 
FindExisting(Key key,EntryMetadata * metadata_out) const526 Status KeyValueStore::FindExisting(Key key, EntryMetadata* metadata_out) const {
527   Status status = FindEntry(key, metadata_out);
528 
529   // If the key's hash collides with an existing key or if the key is deleted,
530   // treat it as if it is not in the KVS.
531   if (status.IsAlreadyExists() ||
532       (status.ok() && metadata_out->state() == EntryState::kDeleted)) {
533     return Status::NotFound();
534   }
535   return status;
536 }
537 
Get(Key key,const EntryMetadata & metadata,std::span<std::byte> value_buffer,size_t offset_bytes) const538 StatusWithSize KeyValueStore::Get(Key key,
539                                   const EntryMetadata& metadata,
540                                   std::span<std::byte> value_buffer,
541                                   size_t offset_bytes) const {
542   Entry entry;
543 
544   PW_TRY_WITH_SIZE(ReadEntry(metadata, entry));
545 
546   StatusWithSize result = entry.ReadValue(value_buffer, offset_bytes);
547   if (result.ok() && options_.verify_on_read && offset_bytes == 0u) {
548     Status verify_result =
549         entry.VerifyChecksum(key, value_buffer.first(result.size()));
550     if (!verify_result.ok()) {
551       std::memset(value_buffer.data(), 0, result.size());
552       return StatusWithSize(verify_result, 0);
553     }
554 
555     return StatusWithSize(verify_result, result.size());
556   }
557   return result;
558 }
559 
FixedSizeGet(Key key,void * value,size_t size_bytes) const560 Status KeyValueStore::FixedSizeGet(Key key,
561                                    void* value,
562                                    size_t size_bytes) const {
563   PW_TRY(CheckWriteOperation(key));
564 
565   EntryMetadata metadata;
566   PW_TRY(FindExisting(key, &metadata));
567 
568   return FixedSizeGet(key, metadata, value, size_bytes);
569 }
570 
FixedSizeGet(Key key,const EntryMetadata & metadata,void * value,size_t size_bytes) const571 Status KeyValueStore::FixedSizeGet(Key key,
572                                    const EntryMetadata& metadata,
573                                    void* value,
574                                    size_t size_bytes) const {
575   // Ensure that the size of the stored value matches the size of the type.
576   // Otherwise, report error. This check avoids potential memory corruption.
577   PW_TRY_ASSIGN(const size_t actual_size, ValueSize(metadata));
578 
579   if (actual_size != size_bytes) {
580     DBG("Requested %u B read, but value is %u B",
581         unsigned(size_bytes),
582         unsigned(actual_size));
583     return Status::InvalidArgument();
584   }
585 
586   StatusWithSize result =
587       Get(key, metadata, std::span(static_cast<byte*>(value), size_bytes), 0);
588 
589   return result.status();
590 }
591 
ValueSize(const EntryMetadata & metadata) const592 StatusWithSize KeyValueStore::ValueSize(const EntryMetadata& metadata) const {
593   Entry entry;
594   PW_TRY_WITH_SIZE(ReadEntry(metadata, entry));
595 
596   return StatusWithSize(entry.value_size());
597 }
598 
CheckWriteOperation(Key key) const599 Status KeyValueStore::CheckWriteOperation(Key key) const {
600   if (InvalidKey(key)) {
601     return Status::InvalidArgument();
602   }
603 
604   // For normal write operation the KVS must be fully ready.
605   if (!initialized()) {
606     return Status::FailedPrecondition();
607   }
608   return OkStatus();
609 }
610 
CheckReadOperation(Key key) const611 Status KeyValueStore::CheckReadOperation(Key key) const {
612   if (InvalidKey(key)) {
613     return Status::InvalidArgument();
614   }
615 
616   // Operations that are explicitly read-only can be done after init() has been
617   // called but not fully ready (when needing maintenance).
618   if (initialized_ == InitializationState::kNotInitialized) {
619     return Status::FailedPrecondition();
620   }
621   return OkStatus();
622 }
623 
WriteEntryForExistingKey(EntryMetadata & metadata,EntryState new_state,Key key,std::span<const byte> value)624 Status KeyValueStore::WriteEntryForExistingKey(EntryMetadata& metadata,
625                                                EntryState new_state,
626                                                Key key,
627                                                std::span<const byte> value) {
628   // Read the original entry to get the size for sector accounting purposes.
629   Entry entry;
630   PW_TRY(ReadEntry(metadata, entry));
631 
632   return WriteEntry(key, value, new_state, &metadata, &entry);
633 }
634 
WriteEntryForNewKey(Key key,std::span<const byte> value)635 Status KeyValueStore::WriteEntryForNewKey(Key key,
636                                           std::span<const byte> value) {
637   if (entry_cache_.full()) {
638     WRN("KVS full: trying to store a new entry, but can't. Have %u entries",
639         unsigned(entry_cache_.total_entries()));
640     return Status::ResourceExhausted();
641   }
642 
643   return WriteEntry(key, value, EntryState::kValid);
644 }
645 
WriteEntry(Key key,std::span<const byte> value,EntryState new_state,EntryMetadata * prior_metadata,const Entry * prior_entry)646 Status KeyValueStore::WriteEntry(Key key,
647                                  std::span<const byte> value,
648                                  EntryState new_state,
649                                  EntryMetadata* prior_metadata,
650                                  const Entry* prior_entry) {
651   // If new entry and prior entry have matching value size, state, and checksum,
652   // check if the values match. Directly compare the prior and new values
653   // because the checksum can not be depended on to establish equality, it can
654   // only be depended on to establish inequality.
655   if (prior_entry != nullptr && prior_entry->value_size() == value.size() &&
656       prior_metadata->state() == new_state &&
657       prior_entry->ValueMatches(value).ok()) {
658     // The new value matches the prior value, don't need to write anything. Just
659     // keep the existing entry.
660     DBG("Write for key 0x%08x with matching value skipped",
661         unsigned(prior_metadata->hash()));
662     return OkStatus();
663   }
664 
665   // List of addresses for sectors with space for this entry.
666   Address* reserved_addresses = entry_cache_.TempReservedAddressesForWrite();
667 
668   // Find addresses to write the entry to. This may involve garbage collecting
669   // one or more sectors.
670   const size_t entry_size = Entry::size(partition_, key, value);
671   PW_TRY(GetAddressesForWrite(reserved_addresses, entry_size));
672 
673   // Write the entry at the first address that was found.
674   Entry entry = CreateEntry(reserved_addresses[0], key, value, new_state);
675   PW_TRY(AppendEntry(entry, key, value));
676 
677   // After writing the first entry successfully, update the key descriptors.
678   // Once a single new the entry is written, the old entries are invalidated.
679   size_t prior_size = prior_entry != nullptr ? prior_entry->size() : 0;
680   EntryMetadata new_metadata =
681       CreateOrUpdateKeyDescriptor(entry, key, prior_metadata, prior_size);
682 
683   // Write the additional copies of the entry, if redundancy is greater than 1.
684   for (size_t i = 1; i < redundancy(); ++i) {
685     entry.set_address(reserved_addresses[i]);
686     PW_TRY(AppendEntry(entry, key, value));
687     new_metadata.AddNewAddress(reserved_addresses[i]);
688   }
689   return OkStatus();
690 }
691 
CreateOrUpdateKeyDescriptor(const Entry & entry,Key key,EntryMetadata * prior_metadata,size_t prior_size)692 KeyValueStore::EntryMetadata KeyValueStore::CreateOrUpdateKeyDescriptor(
693     const Entry& entry,
694     Key key,
695     EntryMetadata* prior_metadata,
696     size_t prior_size) {
697   // If there is no prior descriptor, create a new one.
698   if (prior_metadata == nullptr) {
699     return entry_cache_.AddNew(entry.descriptor(key), entry.address());
700   }
701 
702   return UpdateKeyDescriptor(
703       entry, entry.address(), prior_metadata, prior_size);
704 }
705 
UpdateKeyDescriptor(const Entry & entry,Address new_address,EntryMetadata * prior_metadata,size_t prior_size)706 KeyValueStore::EntryMetadata KeyValueStore::UpdateKeyDescriptor(
707     const Entry& entry,
708     Address new_address,
709     EntryMetadata* prior_metadata,
710     size_t prior_size) {
711   // Remove valid bytes for the old entry and its copies, which are now stale.
712   for (Address address : prior_metadata->addresses()) {
713     sectors_.FromAddress(address).RemoveValidBytes(prior_size);
714   }
715 
716   prior_metadata->Reset(entry.descriptor(prior_metadata->hash()), new_address);
717   return *prior_metadata;
718 }
719 
GetAddressesForWrite(Address * write_addresses,size_t write_size)720 Status KeyValueStore::GetAddressesForWrite(Address* write_addresses,
721                                            size_t write_size) {
722   for (size_t i = 0; i < redundancy(); i++) {
723     SectorDescriptor* sector;
724     PW_TRY(
725         GetSectorForWrite(&sector, write_size, std::span(write_addresses, i)));
726     write_addresses[i] = sectors_.NextWritableAddress(*sector);
727 
728     DBG("Found space for entry in sector %u at address %u",
729         sectors_.Index(sector),
730         unsigned(write_addresses[i]));
731   }
732 
733   return OkStatus();
734 }
735 
736 // Finds a sector to use for writing a new entry to. Does automatic garbage
737 // collection if needed and allowed.
738 //
739 //                 OK: Sector found with needed space.
740 // RESOURCE_EXHAUSTED: No sector available with the needed space.
GetSectorForWrite(SectorDescriptor ** sector,size_t entry_size,std::span<const Address> reserved_addresses)741 Status KeyValueStore::GetSectorForWrite(
742     SectorDescriptor** sector,
743     size_t entry_size,
744     std::span<const Address> reserved_addresses) {
745   Status result = sectors_.FindSpace(sector, entry_size, reserved_addresses);
746 
747   size_t gc_sector_count = 0;
748   bool do_auto_gc = options_.gc_on_write != GargbageCollectOnWrite::kDisabled;
749 
750   // Do garbage collection as needed, so long as policy allows.
751   while (result.IsResourceExhausted() && do_auto_gc) {
752     if (options_.gc_on_write == GargbageCollectOnWrite::kOneSector) {
753       // If GC config option is kOneSector clear the flag to not do any more
754       // GC after this try.
755       do_auto_gc = false;
756     }
757     // Garbage collect and then try again to find the best sector.
758     Status gc_status = GarbageCollect(reserved_addresses);
759     if (!gc_status.ok()) {
760       if (gc_status.IsNotFound()) {
761         // Not enough space, and no reclaimable bytes, this KVS is full!
762         return Status::ResourceExhausted();
763       }
764       return gc_status;
765     }
766 
767     result = sectors_.FindSpace(sector, entry_size, reserved_addresses);
768 
769     gc_sector_count++;
770     // Allow total sectors + 2 number of GC cycles so that once reclaimable
771     // bytes in all the sectors have been reclaimed can try and free up space by
772     // moving entries for keys other than the one being worked on in to sectors
773     // that have copies of the key trying to be written.
774     if (gc_sector_count > (partition_.sector_count() + 2)) {
775       ERR("Did more GC sectors than total sectors!!!!");
776       return Status::ResourceExhausted();
777     }
778   }
779 
780   if (!result.ok()) {
781     WRN("Unable to find sector to write %u B", unsigned(entry_size));
782   }
783   return result;
784 }
785 
MarkSectorCorruptIfNotOk(Status status,SectorDescriptor * sector)786 Status KeyValueStore::MarkSectorCorruptIfNotOk(Status status,
787                                                SectorDescriptor* sector) {
788   if (!status.ok()) {
789     DBG("  Sector %u corrupt", sectors_.Index(sector));
790     sector->mark_corrupt();
791     error_detected_ = true;
792   }
793   return status;
794 }
795 
AppendEntry(const Entry & entry,Key key,std::span<const byte> value)796 Status KeyValueStore::AppendEntry(const Entry& entry,
797                                   Key key,
798                                   std::span<const byte> value) {
799   const StatusWithSize result = entry.Write(key, value);
800 
801   SectorDescriptor& sector = sectors_.FromAddress(entry.address());
802 
803   if (!result.ok()) {
804     ERR("Failed to write %u bytes at %#x. %u actually written",
805         unsigned(entry.size()),
806         unsigned(entry.address()),
807         unsigned(result.size()));
808     PW_TRY(MarkSectorCorruptIfNotOk(result.status(), &sector));
809   }
810 
811   if (options_.verify_on_write) {
812     PW_TRY(MarkSectorCorruptIfNotOk(entry.VerifyChecksumInFlash(), &sector));
813   }
814 
815   sector.RemoveWritableBytes(result.size());
816   sector.AddValidBytes(result.size());
817   return OkStatus();
818 }
819 
CopyEntryToSector(Entry & entry,SectorDescriptor * new_sector,Address new_address)820 StatusWithSize KeyValueStore::CopyEntryToSector(Entry& entry,
821                                                 SectorDescriptor* new_sector,
822                                                 Address new_address) {
823   const StatusWithSize result = entry.Copy(new_address);
824 
825   PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(result.status(), new_sector));
826 
827   if (options_.verify_on_write) {
828     Entry new_entry;
829     PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(
830         Entry::Read(partition_, new_address, formats_, &new_entry),
831         new_sector));
832     // TODO: add test that catches doing the verify on the old entry.
833     PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(new_entry.VerifyChecksumInFlash(),
834                                               new_sector));
835   }
836   // Entry was written successfully; update descriptor's address and the sector
837   // descriptors to reflect the new entry.
838   new_sector->RemoveWritableBytes(result.size());
839   new_sector->AddValidBytes(result.size());
840 
841   return result;
842 }
843 
RelocateEntry(const EntryMetadata & metadata,KeyValueStore::Address & address,std::span<const Address> reserved_addresses)844 Status KeyValueStore::RelocateEntry(
845     const EntryMetadata& metadata,
846     KeyValueStore::Address& address,
847     std::span<const Address> reserved_addresses) {
848   Entry entry;
849   PW_TRY(ReadEntry(metadata, entry));
850 
851   // Find a new sector for the entry and write it to the new location. For
852   // relocation the find should not not be a sector already containing the key
853   // but can be the always empty sector, since this is part of the GC process
854   // that will result in a new empty sector. Also find a sector that does not
855   // have reclaimable space (mostly for the full GC, where that would result in
856   // an immediate extra relocation).
857   SectorDescriptor* new_sector;
858 
859   PW_TRY(sectors_.FindSpaceDuringGarbageCollection(
860       &new_sector, entry.size(), metadata.addresses(), reserved_addresses));
861 
862   Address new_address = sectors_.NextWritableAddress(*new_sector);
863   PW_TRY_ASSIGN(const size_t result_size,
864                 CopyEntryToSector(entry, new_sector, new_address));
865   sectors_.FromAddress(address).RemoveValidBytes(result_size);
866   address = new_address;
867 
868   return OkStatus();
869 }
870 
FullMaintenanceHelper(MaintenanceType maintenance_type)871 Status KeyValueStore::FullMaintenanceHelper(MaintenanceType maintenance_type) {
872   if (initialized_ == InitializationState::kNotInitialized) {
873     return Status::FailedPrecondition();
874   }
875 
876   // Full maintenance can be a potentially heavy operation, and should be
877   // relatively infrequent, so log start/end at INFO level.
878   INF("Beginning full maintenance");
879   CheckForErrors();
880 
881   if (error_detected_) {
882     PW_TRY(Repair());
883   }
884   StatusWithSize update_status = UpdateEntriesToPrimaryFormat();
885   Status overall_status = update_status.status();
886 
887   // Make sure all the entries are on the primary format.
888   if (!overall_status.ok()) {
889     ERR("Failed to update all entries to the primary format");
890   }
891 
892   SectorDescriptor* sector = sectors_.last_new();
893 
894   // Calculate number of bytes for the threshold.
895   size_t threshold_bytes =
896       (partition_.size_bytes() * kGcUsageThresholdPercentage) / 100;
897 
898   // Is bytes in use over the threshold.
899   StorageStats stats = GetStorageStats();
900   bool over_usage_threshold = stats.in_use_bytes > threshold_bytes;
901   bool heavy = (maintenance_type == MaintenanceType::kHeavy);
902   bool force_gc = heavy || over_usage_threshold || (update_status.size() > 0);
903 
904   // TODO: look in to making an iterator method for cycling through sectors
905   // starting from last_new_sector_.
906   Status gc_status;
907   for (size_t j = 0; j < sectors_.size(); j++) {
908     sector += 1;
909     if (sector == sectors_.end()) {
910       sector = sectors_.begin();
911     }
912 
913     if (sector->RecoverableBytes(partition_.sector_size_bytes()) > 0 &&
914         (force_gc || sector->valid_bytes() == 0)) {
915       gc_status = GarbageCollectSector(*sector, {});
916       if (!gc_status.ok()) {
917         ERR("Failed to garbage collect all sectors");
918         break;
919       }
920     }
921   }
922   if (overall_status.ok()) {
923     overall_status = gc_status;
924   }
925 
926   if (overall_status.ok()) {
927     INF("Full maintenance complete");
928   } else {
929     ERR("Full maintenance finished with some errors");
930   }
931   return overall_status;
932 }
933 
PartialMaintenance()934 Status KeyValueStore::PartialMaintenance() {
935   if (initialized_ == InitializationState::kNotInitialized) {
936     return Status::FailedPrecondition();
937   }
938 
939   CheckForErrors();
940   // Do automatic repair, if KVS options allow for it.
941   if (error_detected_ && options_.recovery != ErrorRecovery::kManual) {
942     PW_TRY(Repair());
943   }
944   return GarbageCollect(std::span<const Address>());
945 }
946 
GarbageCollect(std::span<const Address> reserved_addresses)947 Status KeyValueStore::GarbageCollect(
948     std::span<const Address> reserved_addresses) {
949   DBG("Garbage Collect a single sector");
950   for ([[maybe_unused]] Address address : reserved_addresses) {
951     DBG("   Avoid address %u", unsigned(address));
952   }
953 
954   // Step 1: Find the sector to garbage collect
955   SectorDescriptor* sector_to_gc =
956       sectors_.FindSectorToGarbageCollect(reserved_addresses);
957 
958   if (sector_to_gc == nullptr) {
959     // Nothing to GC.
960     return Status::NotFound();
961   }
962 
963   // Step 2: Garbage collect the selected sector.
964   return GarbageCollectSector(*sector_to_gc, reserved_addresses);
965 }
966 
RelocateKeyAddressesInSector(SectorDescriptor & sector_to_gc,const EntryMetadata & metadata,std::span<const Address> reserved_addresses)967 Status KeyValueStore::RelocateKeyAddressesInSector(
968     SectorDescriptor& sector_to_gc,
969     const EntryMetadata& metadata,
970     std::span<const Address> reserved_addresses) {
971   for (FlashPartition::Address& address : metadata.addresses()) {
972     if (sectors_.AddressInSector(sector_to_gc, address)) {
973       DBG("  Relocate entry for Key 0x%08" PRIx32 ", sector %u",
974           metadata.hash(),
975           sectors_.Index(sectors_.FromAddress(address)));
976       PW_TRY(RelocateEntry(metadata, address, reserved_addresses));
977     }
978   }
979 
980   return OkStatus();
981 };
982 
GarbageCollectSector(SectorDescriptor & sector_to_gc,std::span<const Address> reserved_addresses)983 Status KeyValueStore::GarbageCollectSector(
984     SectorDescriptor& sector_to_gc,
985     std::span<const Address> reserved_addresses) {
986   DBG("  Garbage Collect sector %u", sectors_.Index(sector_to_gc));
987 
988   // Step 1: Move any valid entries in the GC sector to other sectors
989   if (sector_to_gc.valid_bytes() != 0) {
990     for (EntryMetadata& metadata : entry_cache_) {
991       PW_TRY(RelocateKeyAddressesInSector(
992           sector_to_gc, metadata, reserved_addresses));
993     }
994   }
995 
996   if (sector_to_gc.valid_bytes() != 0) {
997     ERR("  Failed to relocate valid entries from sector being garbage "
998         "collected, %u valid bytes remain",
999         unsigned(sector_to_gc.valid_bytes()));
1000     return Status::Internal();
1001   }
1002 
1003   // Step 2: Reinitialize the sector
1004   if (!sector_to_gc.Empty(partition_.sector_size_bytes())) {
1005     sector_to_gc.mark_corrupt();
1006     internal_stats_.sector_erase_count++;
1007     PW_TRY(partition_.Erase(sectors_.BaseAddress(sector_to_gc), 1));
1008     sector_to_gc.set_writable_bytes(partition_.sector_size_bytes());
1009   }
1010 
1011   DBG("  Garbage Collect sector %u complete", sectors_.Index(sector_to_gc));
1012   return OkStatus();
1013 }
1014 
UpdateEntriesToPrimaryFormat()1015 StatusWithSize KeyValueStore::UpdateEntriesToPrimaryFormat() {
1016   size_t entries_updated = 0;
1017   for (EntryMetadata& prior_metadata : entry_cache_) {
1018     Entry entry;
1019     PW_TRY_WITH_SIZE(ReadEntry(prior_metadata, entry));
1020     if (formats_.primary().magic == entry.magic()) {
1021       // Ignore entries that are already on the primary format.
1022       continue;
1023     }
1024 
1025     DBG("Updating entry 0x%08x from old format [0x%08x] to new format "
1026         "[0x%08x]",
1027         unsigned(prior_metadata.hash()),
1028         unsigned(entry.magic()),
1029         unsigned(formats_.primary().magic));
1030 
1031     entries_updated++;
1032 
1033     last_transaction_id_ += 1;
1034     PW_TRY_WITH_SIZE(entry.Update(formats_.primary(), last_transaction_id_));
1035 
1036     // List of addresses for sectors with space for this entry.
1037     Address* reserved_addresses = entry_cache_.TempReservedAddressesForWrite();
1038 
1039     // Find addresses to write the entry to. This may involve garbage collecting
1040     // one or more sectors.
1041     PW_TRY_WITH_SIZE(GetAddressesForWrite(reserved_addresses, entry.size()));
1042 
1043     PW_TRY_WITH_SIZE(
1044         CopyEntryToSector(entry,
1045                           &sectors_.FromAddress(reserved_addresses[0]),
1046                           reserved_addresses[0]));
1047 
1048     // After writing the first entry successfully, update the key descriptors.
1049     // Once a single new the entry is written, the old entries are invalidated.
1050     EntryMetadata new_metadata = UpdateKeyDescriptor(
1051         entry, reserved_addresses[0], &prior_metadata, entry.size());
1052 
1053     // Write the additional copies of the entry, if redundancy is greater
1054     // than 1.
1055     for (size_t i = 1; i < redundancy(); ++i) {
1056       PW_TRY_WITH_SIZE(
1057           CopyEntryToSector(entry,
1058                             &sectors_.FromAddress(reserved_addresses[i]),
1059                             reserved_addresses[i]));
1060       new_metadata.AddNewAddress(reserved_addresses[i]);
1061     }
1062   }
1063 
1064   return StatusWithSize(entries_updated);
1065 }
1066 
1067 // Add any missing redundant entries/copies for a key.
AddRedundantEntries(EntryMetadata & metadata)1068 Status KeyValueStore::AddRedundantEntries(EntryMetadata& metadata) {
1069   Entry entry;
1070   PW_TRY(ReadEntry(metadata, entry));
1071   PW_TRY(entry.VerifyChecksumInFlash());
1072 
1073   while (metadata.addresses().size() < redundancy()) {
1074     SectorDescriptor* new_sector;
1075     PW_TRY(GetSectorForWrite(&new_sector, entry.size(), metadata.addresses()));
1076 
1077     Address new_address = sectors_.NextWritableAddress(*new_sector);
1078     PW_TRY(CopyEntryToSector(entry, new_sector, new_address));
1079 
1080     metadata.AddNewAddress(new_address);
1081   }
1082   return OkStatus();
1083 }
1084 
RepairCorruptSectors()1085 Status KeyValueStore::RepairCorruptSectors() {
1086   // Try to GC each corrupt sector, even if previous sectors fail. If GC of a
1087   // sector failed on the first pass, then do a second pass, since a later
1088   // sector might have cleared up space or otherwise unblocked the earlier
1089   // failed sector.
1090   Status repair_status = OkStatus();
1091 
1092   size_t loop_count = 0;
1093   do {
1094     loop_count++;
1095     // Error of RESOURCE_EXHAUSTED indicates no space found for relocation.
1096     // Reset back to OK for the next pass.
1097     if (repair_status.IsResourceExhausted()) {
1098       repair_status = OkStatus();
1099     }
1100 
1101     DBG("   Pass %u", unsigned(loop_count));
1102     for (SectorDescriptor& sector : sectors_) {
1103       if (sector.corrupt()) {
1104         DBG("   Found sector %u with corruption", sectors_.Index(sector));
1105         Status sector_status = GarbageCollectSector(sector, {});
1106         if (sector_status.ok()) {
1107           internal_stats_.corrupt_sectors_recovered += 1;
1108         } else if (repair_status.ok() || repair_status.IsResourceExhausted()) {
1109           repair_status = sector_status;
1110         }
1111       }
1112     }
1113     DBG("   Pass %u complete", unsigned(loop_count));
1114   } while (!repair_status.ok() && loop_count < 2);
1115 
1116   return repair_status;
1117 }
1118 
EnsureFreeSectorExists()1119 Status KeyValueStore::EnsureFreeSectorExists() {
1120   Status repair_status = OkStatus();
1121   bool empty_sector_found = false;
1122 
1123   DBG("   Find empty sector");
1124   for (SectorDescriptor& sector : sectors_) {
1125     if (sector.Empty(partition_.sector_size_bytes())) {
1126       empty_sector_found = true;
1127       DBG("   Empty sector found");
1128       break;
1129     }
1130   }
1131   if (empty_sector_found == false) {
1132     DBG("   No empty sector found, attempting to GC a free sector");
1133     Status sector_status = GarbageCollect(std::span<const Address, 0>());
1134     if (repair_status.ok() && !sector_status.ok()) {
1135       DBG("   Unable to free an empty sector");
1136       repair_status = sector_status;
1137     }
1138   }
1139 
1140   return repair_status;
1141 }
1142 
EnsureEntryRedundancy()1143 Status KeyValueStore::EnsureEntryRedundancy() {
1144   Status repair_status = OkStatus();
1145 
1146   if (redundancy() == 1) {
1147     DBG("   Redundancy not in use, nothting to check");
1148     return OkStatus();
1149   }
1150 
1151   DBG("   Write any needed additional duplicate copies of keys to fulfill %u"
1152       " redundancy",
1153       unsigned(redundancy()));
1154   for (EntryMetadata& metadata : entry_cache_) {
1155     if (metadata.addresses().size() >= redundancy()) {
1156       continue;
1157     }
1158 
1159     DBG("   Key with %u of %u copies found, adding missing copies",
1160         unsigned(metadata.addresses().size()),
1161         unsigned(redundancy()));
1162     Status fill_status = AddRedundantEntries(metadata);
1163     if (fill_status.ok()) {
1164       internal_stats_.missing_redundant_entries_recovered += 1;
1165       DBG("   Key missing copies added");
1166     } else {
1167       DBG("   Failed to add key missing copies");
1168       if (repair_status.ok()) {
1169         repair_status = fill_status;
1170       }
1171     }
1172   }
1173 
1174   return repair_status;
1175 }
1176 
FixErrors()1177 Status KeyValueStore::FixErrors() {
1178   DBG("Fixing KVS errors");
1179 
1180   // Step 1: Garbage collect any sectors marked as corrupt.
1181   Status overall_status = RepairCorruptSectors();
1182 
1183   // Step 2: Make sure there is at least 1 empty sector. This needs to be a
1184   // seperate check of sectors from step 1, because a found empty sector might
1185   // get written to by a later GC that fails and does not result in a free
1186   // sector.
1187   Status repair_status = EnsureFreeSectorExists();
1188   if (overall_status.ok()) {
1189     overall_status = repair_status;
1190   }
1191 
1192   // Step 3: Make sure each stored key has the full number of redundant
1193   // entries.
1194   repair_status = EnsureEntryRedundancy();
1195   if (overall_status.ok()) {
1196     overall_status = repair_status;
1197   }
1198 
1199   if (overall_status.ok()) {
1200     error_detected_ = false;
1201     initialized_ = InitializationState::kReady;
1202   }
1203   return overall_status;
1204 }
1205 
Repair()1206 Status KeyValueStore::Repair() {
1207   // If errors have been detected, just reinit the KVS metadata. This does a
1208   // full deep error check and any needed repairs. Then repair any errors.
1209   INF("Starting KVS repair");
1210 
1211   DBG("Reinitialize KVS metadata");
1212   InitializeMetadata()
1213       .IgnoreError();  // TODO(pwbug/387): Handle Status properly
1214 
1215   return FixErrors();
1216 }
1217 
CreateEntry(Address address,Key key,std::span<const byte> value,EntryState state)1218 KeyValueStore::Entry KeyValueStore::CreateEntry(Address address,
1219                                                 Key key,
1220                                                 std::span<const byte> value,
1221                                                 EntryState state) {
1222   // Always bump the transaction ID when creating a new entry.
1223   //
1224   // Burning transaction IDs prevents inconsistencies between flash and memory
1225   // that which could happen if a write succeeds, but for some reason the read
1226   // and verify step fails. Here's how this would happen:
1227   //
1228   //   1. The entry is written but for some reason the flash reports failure OR
1229   //      The write succeeds, but the read / verify operation fails.
1230   //   2. The transaction ID is NOT incremented, because of the failure
1231   //   3. (later) A new entry is written, re-using the transaction ID (oops)
1232   //
1233   // By always burning transaction IDs, the above problem can't happen.
1234   last_transaction_id_ += 1;
1235 
1236   if (state == EntryState::kDeleted) {
1237     return Entry::Tombstone(
1238         partition_, address, formats_.primary(), key, last_transaction_id_);
1239   }
1240   return Entry::Valid(partition_,
1241                       address,
1242                       formats_.primary(),
1243                       key,
1244                       value,
1245                       last_transaction_id_);
1246 }
1247 
LogDebugInfo() const1248 void KeyValueStore::LogDebugInfo() const {
1249   const size_t sector_size_bytes = partition_.sector_size_bytes();
1250   DBG("====================== KEY VALUE STORE DUMP =========================");
1251   DBG(" ");
1252   DBG("Flash partition:");
1253   DBG("  Sector count     = %u", unsigned(partition_.sector_count()));
1254   DBG("  Sector max count = %u", unsigned(sectors_.max_size()));
1255   DBG("  Sectors in use   = %u", unsigned(sectors_.size()));
1256   DBG("  Sector size      = %u", unsigned(sector_size_bytes));
1257   DBG("  Total size       = %u", unsigned(partition_.size_bytes()));
1258   DBG("  Alignment        = %u", unsigned(partition_.alignment_bytes()));
1259   DBG(" ");
1260   DBG("Key descriptors:");
1261   DBG("  Entry count     = %u", unsigned(entry_cache_.total_entries()));
1262   DBG("  Max entry count = %u", unsigned(entry_cache_.max_entries()));
1263   DBG(" ");
1264   DBG("      #     hash        version    address   address (hex)");
1265   size_t count = 0;
1266   for (const EntryMetadata& metadata : entry_cache_) {
1267     DBG("   |%3zu: | %8zx  |%8zu  | %8zu | %8zx",
1268         count++,
1269         size_t(metadata.hash()),
1270         size_t(metadata.transaction_id()),
1271         size_t(metadata.first_address()),
1272         size_t(metadata.first_address()));
1273   }
1274   DBG(" ");
1275 
1276   DBG("Sector descriptors:");
1277   DBG("      #     tail free  valid    has_space");
1278   for (const SectorDescriptor& sd : sectors_) {
1279     DBG("   |%3u: | %8zu  |%8zu  | %s",
1280         sectors_.Index(sd),
1281         size_t(sd.writable_bytes()),
1282         sd.valid_bytes(),
1283         sd.writable_bytes() ? "YES" : "");
1284   }
1285   DBG(" ");
1286 
1287   // TODO: This should stop logging after some threshold.
1288   // size_t dumped_bytes = 0;
1289   DBG("Sector raw data:");
1290   for (size_t sector_id = 0; sector_id < sectors_.size(); ++sector_id) {
1291     // Read sector data. Yes, this will blow the stack on embedded.
1292     std::array<byte, 500> raw_sector_data;  // TODO!!!
1293     [[maybe_unused]] StatusWithSize sws =
1294         partition_.Read(sector_id * sector_size_bytes, raw_sector_data);
1295     DBG("Read: %u bytes", unsigned(sws.size()));
1296 
1297     DBG("  base    addr  offs   0  1  2  3  4  5  6  7");
1298     for (size_t i = 0; i < sector_size_bytes; i += 8) {
1299       DBG("  %3zu %8zx %5zu | %02x %02x %02x %02x %02x %02x %02x %02x",
1300           sector_id,
1301           (sector_id * sector_size_bytes) + i,
1302           i,
1303           static_cast<unsigned int>(raw_sector_data[i + 0]),
1304           static_cast<unsigned int>(raw_sector_data[i + 1]),
1305           static_cast<unsigned int>(raw_sector_data[i + 2]),
1306           static_cast<unsigned int>(raw_sector_data[i + 3]),
1307           static_cast<unsigned int>(raw_sector_data[i + 4]),
1308           static_cast<unsigned int>(raw_sector_data[i + 5]),
1309           static_cast<unsigned int>(raw_sector_data[i + 6]),
1310           static_cast<unsigned int>(raw_sector_data[i + 7]));
1311 
1312       // TODO: Fix exit condition.
1313       if (i > 128) {
1314         break;
1315       }
1316     }
1317     DBG(" ");
1318   }
1319 
1320   DBG("////////////////////// KEY VALUE STORE DUMP END /////////////////////");
1321 }
1322 
LogSectors() const1323 void KeyValueStore::LogSectors() const {
1324   DBG("Sector descriptors: count %u", unsigned(sectors_.size()));
1325   for (auto& sector : sectors_) {
1326     DBG("  - Sector %u: valid %u, recoverable %u, free %u",
1327         sectors_.Index(sector),
1328         unsigned(sector.valid_bytes()),
1329         unsigned(sector.RecoverableBytes(partition_.sector_size_bytes())),
1330         unsigned(sector.writable_bytes()));
1331   }
1332 }
1333 
LogKeyDescriptor() const1334 void KeyValueStore::LogKeyDescriptor() const {
1335   DBG("Key descriptors: count %u", unsigned(entry_cache_.total_entries()));
1336   for (const EntryMetadata& metadata : entry_cache_) {
1337     DBG("  - Key: %s, hash %#x, transaction ID %u, first address %#x",
1338         metadata.state() == EntryState::kDeleted ? "Deleted" : "Valid",
1339         unsigned(metadata.hash()),
1340         unsigned(metadata.transaction_id()),
1341         unsigned(metadata.first_address()));
1342   }
1343 }
1344 
1345 }  // namespace pw::kvs
1346