• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <linux/fs.h>
20 
21 #include <algorithm>
22 #include <chrono>
23 #include <cstring>
24 #include <memory>
25 #include <set>
26 #include <string>
27 #include <utility>
28 #include <vector>
29 
30 #include <android-base/properties.h>
31 #include <android-base/strings.h>
32 #include <base/files/file_util.h>
33 #include <base/format_macros.h>
34 #include <base/metrics/histogram_macros.h>
35 #include <base/strings/string_number_conversions.h>
36 #include <android-base/stringprintf.h>
37 #include <base/time/time.h>
38 #include <brillo/data_encoding.h>
39 #include <bsdiff/bspatch.h>
40 #include <google/protobuf/repeated_field.h>
41 #include <puffin/puffpatch.h>
42 
43 #include "libsnapshot/cow_format.h"
44 #include "update_engine/common/constants.h"
45 #include "update_engine/common/download_action.h"
46 #include "update_engine/common/error_code.h"
47 #include "update_engine/common/error_code_utils.h"
48 #include "update_engine/common/hardware_interface.h"
49 #include "update_engine/common/prefs_interface.h"
50 #include "update_engine/common/terminator.h"
51 #include "update_engine/common/utils.h"
52 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
53 #include "update_engine/payload_consumer/partition_writer.h"
54 #include "update_engine/update_metadata.pb.h"
55 #if USE_FEC
56 #include "update_engine/payload_consumer/fec_file_descriptor.h"
57 #endif  // USE_FEC
58 #include "update_engine/payload_consumer/payload_constants.h"
59 #include "update_engine/payload_consumer/payload_verifier.h"
60 
61 using google::protobuf::RepeatedPtrField;
62 using std::min;
63 using std::string;
64 using std::vector;
65 
66 namespace chromeos_update_engine {
67 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
68 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
69 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
70 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
71 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
72 
73 namespace {
74 const int kUpdateStateOperationInvalid = -1;
75 const int kMaxResumedUpdateFailures = 10;
76 
77 }  // namespace
78 
79 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
80 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)81 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
82   return part * norm / total;
83 }
84 
LogProgress(const char * message_prefix)85 void DeltaPerformer::LogProgress(const char* message_prefix) {
86   // Format operations total count and percentage.
87   string total_operations_str("?");
88   string completed_percentage_str("");
89   if (num_total_operations_) {
90     total_operations_str = std::to_string(num_total_operations_);
91     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
92     completed_percentage_str = android::base::StringPrintf(
93         " (%" PRIu64 "%%)",
94         IntRatio(next_operation_num_, num_total_operations_, 100));
95   }
96 
97   // Format download total count and percentage.
98   size_t payload_size = payload_->size;
99   string payload_size_str("?");
100   string downloaded_percentage_str("");
101   if (payload_size) {
102     payload_size_str = std::to_string(payload_size);
103     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
104     downloaded_percentage_str = android::base::StringPrintf(
105         " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
106   }
107 
108   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
109             << "/" << total_operations_str << " operations"
110             << completed_percentage_str << ", " << total_bytes_received_ << "/"
111             << payload_size_str << " bytes downloaded"
112             << downloaded_percentage_str << ", overall progress "
113             << overall_progress_ << "%";
114 }
115 
UpdateOverallProgress(bool force_log,const char * message_prefix)116 void DeltaPerformer::UpdateOverallProgress(bool force_log,
117                                            const char* message_prefix) {
118   // Compute our download and overall progress.
119   unsigned new_overall_progress = 0;
120   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
121                 "Progress weights don't add up");
122   // Only consider download progress if its total size is known; otherwise
123   // adjust the operations weight to compensate for the absence of download
124   // progress. Also, make sure to cap the download portion at
125   // kProgressDownloadWeight, in case we end up downloading more than we
126   // initially expected (this indicates a problem, but could generally happen).
127   // TODO(garnold) the correction of operations weight when we do not have the
128   // total payload size, as well as the conditional guard below, should both be
129   // eliminated once we ensure that the payload_size in the install plan is
130   // always given and is non-zero. This currently isn't the case during unit
131   // tests (see chromium-os:37969).
132   size_t payload_size = payload_->size;
133   unsigned actual_operations_weight = kProgressOperationsWeight;
134   if (payload_size)
135     new_overall_progress +=
136         min(static_cast<unsigned>(IntRatio(
137                 total_bytes_received_, payload_size, kProgressDownloadWeight)),
138             kProgressDownloadWeight);
139   else
140     actual_operations_weight += kProgressDownloadWeight;
141 
142   // Only add completed operations if their total number is known; we definitely
143   // expect an update to have at least one operation, so the expectation is that
144   // this will eventually reach |actual_operations_weight|.
145   if (num_total_operations_)
146     new_overall_progress += IntRatio(
147         next_operation_num_, num_total_operations_, actual_operations_weight);
148 
149   // Progress ratio cannot recede, unless our assumptions about the total
150   // payload size, total number of operations, or the monotonicity of progress
151   // is breached.
152   if (new_overall_progress < overall_progress_) {
153     LOG(WARNING) << "progress counter receded from " << overall_progress_
154                  << "% down to " << new_overall_progress << "%; this is a bug";
155     force_log = true;
156   }
157   overall_progress_ = new_overall_progress;
158 
159   // Update chunk index, log as needed: if forced by called, or we completed a
160   // progress chunk, or a timeout has expired.
161   base::TimeTicks curr_time = base::TimeTicks::Now();
162   unsigned curr_progress_chunk =
163       overall_progress_ * kProgressLogMaxChunks / 100;
164   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
165       curr_time > forced_progress_log_time_) {
166     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
167     LogProgress(message_prefix);
168   }
169   last_progress_chunk_ = curr_progress_chunk;
170 }
171 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)172 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
173                                         size_t* count_p,
174                                         size_t max) {
175   const size_t count = *count_p;
176   if (!count)
177     return 0;  // Special case shortcut.
178   size_t read_len = min(count, max - buffer_.size());
179   const char* bytes_start = *bytes_p;
180   const char* bytes_end = bytes_start + read_len;
181   buffer_.reserve(max);
182   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
183   *bytes_p = bytes_end;
184   *count_p = count - read_len;
185   return read_len;
186 }
187 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)188 bool DeltaPerformer::HandleOpResult(bool op_result,
189                                     const char* op_type_name,
190                                     ErrorCode* error) {
191   if (op_result)
192     return true;
193 
194   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
195              << next_operation_num_ << ", which is the operation "
196              << GetPartitionOperationNum() << " in partition \""
197              << partitions_[current_partition_].partition_name() << "\"";
198   if (*error == ErrorCode::kSuccess)
199     *error = ErrorCode::kDownloadOperationExecutionError;
200   return false;
201 }
202 
Close()203 int DeltaPerformer::Close() {
204   // Checkpoint update progress before canceling, so that subsequent attempts
205   // can resume from exactly where update_engine left last time.
206   CheckpointUpdateProgress(true);
207   int err = -CloseCurrentPartition();
208   LOG_IF(ERROR,
209          !payload_hash_calculator_.Finalize() ||
210              !signed_hash_calculator_.Finalize())
211       << "Unable to finalize the hash.";
212   if (!buffer_.empty()) {
213     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
214     if (err >= 0)
215       err = 1;
216   }
217   return -err;
218 }
219 
CloseCurrentPartition()220 int DeltaPerformer::CloseCurrentPartition() {
221   if (!partition_writer_) {
222     return 0;
223   }
224   int err = partition_writer_->Close();
225   partition_writer_ = nullptr;
226   return err;
227 }
228 
OpenCurrentPartition()229 bool DeltaPerformer::OpenCurrentPartition() {
230   if (current_partition_ >= partitions_.size())
231     return false;
232 
233   const PartitionUpdate& partition = partitions_[current_partition_];
234   size_t num_previous_partitions =
235       install_plan_->partitions.size() - partitions_.size();
236   const InstallPlan::Partition& install_part =
237       install_plan_->partitions[num_previous_partitions + current_partition_];
238   auto dynamic_control = boot_control_->GetDynamicPartitionControl();
239   partition_writer_ = CreatePartitionWriter(
240       partition,
241       install_part,
242       dynamic_control,
243       block_size_,
244       interactive_,
245       IsDynamicPartition(install_part.name, install_plan_->target_slot));
246   // Open source fds if we have a delta payload, or for partitions in the
247   // partial update.
248   const bool source_may_exist = manifest_.partial_update() ||
249                                 payload_->type == InstallPayloadType::kDelta;
250   const size_t partition_operation_num = GetPartitionOperationNum();
251 
252   TEST_AND_RETURN_FALSE(partition_writer_->Init(
253       install_plan_, source_may_exist, partition_operation_num));
254   CheckpointUpdateProgress(true);
255   return true;
256 }
257 
GetPartitionOperationNum()258 size_t DeltaPerformer::GetPartitionOperationNum() {
259   return next_operation_num_ -
260          (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
261 }
262 
263 namespace {
264 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)265 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
266   string sha256 = HexEncode(info.hash());
267   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
268             << " size: " << info.size();
269 }
270 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)271 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
272   for (const PartitionUpdate& partition : partitions) {
273     if (partition.has_old_partition_info()) {
274       LogPartitionInfoHash(partition.old_partition_info(),
275                            "old " + partition.partition_name());
276     }
277     LogPartitionInfoHash(partition.new_partition_info(),
278                          "new " + partition.partition_name());
279   }
280 }
281 
282 }  // namespace
283 
IsHeaderParsed() const284 bool DeltaPerformer::IsHeaderParsed() const {
285   return metadata_size_ != 0;
286 }
287 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)288 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
289     const brillo::Blob& payload, ErrorCode* error) {
290   *error = ErrorCode::kSuccess;
291 
292   if (!IsHeaderParsed()) {
293     MetadataParseResult result =
294         payload_metadata_.ParsePayloadHeader(payload, error);
295     if (result != MetadataParseResult::kSuccess)
296       return result;
297 
298     metadata_size_ = payload_metadata_.GetMetadataSize();
299     metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
300     major_payload_version_ = payload_metadata_.GetMajorVersion();
301 
302     // If the metadata size is present in install plan, check for it immediately
303     // even before waiting for that many number of bytes to be downloaded in the
304     // payload. This will prevent any attack which relies on us downloading data
305     // beyond the expected metadata size.
306     if (install_plan_->hash_checks_mandatory) {
307       if (payload_->metadata_size != metadata_size_) {
308         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
309                    << payload_->metadata_size
310                    << ") is missing/incorrect, actual = " << metadata_size_;
311         *error = ErrorCode::kDownloadInvalidMetadataSize;
312         return MetadataParseResult::kError;
313       }
314     }
315 
316     // Check that the |metadata signature size_| and |metadata_size_| are not
317     // very big numbers. This is necessary since |update_engine| needs to write
318     // these values into the buffer before being able to use them, and if an
319     // attacker sets these values to a very big number, the buffer will overflow
320     // and |update_engine| will crash. A simple way of solving this is to check
321     // that the size of both values is smaller than the payload itself.
322     if (metadata_size_ + metadata_signature_size_ > payload_->size) {
323       LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
324                  << " or metadata signature(" << metadata_signature_size_ << ")"
325                  << " is greater than the size of the payload" << "("
326                  << payload_->size << ")";
327       *error = ErrorCode::kDownloadInvalidMetadataSize;
328       return MetadataParseResult::kError;
329     }
330   }
331 
332   // Now that we have validated the metadata size, we should wait for the full
333   // metadata and its signature (if exist) to be read in before we can parse it.
334   if (payload.size() < metadata_size_ + metadata_signature_size_)
335     return MetadataParseResult::kInsufficientData;
336 
337   // Log whether we validated the size or simply trusting what's in the payload
338   // here. This is logged here (after we received the full metadata data) so
339   // that we just log once (instead of logging n times) if it takes n
340   // DeltaPerformer::Write calls to download the full manifest.
341   if (payload_->metadata_size == metadata_size_) {
342     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
343   } else {
344     // For mandatory-cases, we'd have already returned a kMetadataParseError
345     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
346     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
347                  << payload_->metadata_size
348                  << ") in Omaha response as validation is not mandatory. "
349                  << "Trusting metadata size in payload = " << metadata_size_;
350   }
351 
352   // NOLINTNEXTLINE(whitespace/braces)
353   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
354   if (!payload_verifier) {
355     LOG(ERROR) << "Failed to create payload verifier.";
356     *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
357     if (perform_verification) {
358       return MetadataParseResult::kError;
359     }
360   } else {
361     // We have the full metadata in |payload|. Verify its integrity
362     // and authenticity based on the information we have in Omaha response.
363     *error = payload_metadata_.ValidateMetadataSignature(
364         payload, payload_->metadata_signature, *payload_verifier);
365   }
366   if (*error != ErrorCode::kSuccess) {
367     if (install_plan_->hash_checks_mandatory) {
368       // The autoupdate_CatchBadSignatures test checks for this string
369       // in log-files. Keep in sync.
370       LOG(ERROR) << "Mandatory metadata signature validation failed";
371       return MetadataParseResult::kError;
372     }
373 
374     // For non-mandatory cases, just send a UMA stat.
375     LOG(WARNING) << "Ignoring metadata signature validation failures";
376     *error = ErrorCode::kSuccess;
377   }
378 
379   // The payload metadata is deemed valid, it's safe to parse the protobuf.
380   if (!payload_metadata_.GetManifest(payload, &manifest_)) {
381     LOG(ERROR) << "Unable to parse manifest in update file.";
382     *error = ErrorCode::kDownloadManifestParseError;
383     return MetadataParseResult::kError;
384   }
385 
386   manifest_parsed_ = true;
387   return MetadataParseResult::kSuccess;
388 }
389 
390 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)                        \
391   LOCAL_HISTOGRAM_CUSTOM_TIMES(                                             \
392       "UpdateEngine.DownloadAction.InstallOperation::" + string(_op_name) + \
393           ".Duration",                                                      \
394       (base::TimeTicks::Now() - _start_time),                               \
395       base::TimeDelta::FromMilliseconds(10),                                \
396       base::TimeDelta::FromMinutes(5),                                      \
397       20);
398 
CheckSPLDowngrade()399 bool DeltaPerformer::CheckSPLDowngrade() {
400   if (!manifest_.has_security_patch_level()) {
401     return true;
402   }
403   if (manifest_.security_patch_level().empty()) {
404     return true;
405   }
406   const auto new_spl = manifest_.security_patch_level();
407   const auto current_spl =
408       android::base::GetProperty("ro.build.version.security_patch", "");
409   if (current_spl.empty()) {
410     LOG(WARNING) << "Failed to get ro.build.version.security_patch, unable to "
411                     "determine if this OTA is a SPL downgrade. Assuming this "
412                     "OTA is not SPL downgrade.";
413     return true;
414   }
415   if (new_spl < current_spl) {
416     const auto avb_state =
417         android::base::GetProperty("ro.boot.verifiedbootstate", "green");
418     if (android::base::EqualsIgnoreCase(avb_state, "green")) {
419       LOG(ERROR) << "Target build SPL " << new_spl
420                  << " is older than current build's SPL " << current_spl
421                  << ", this OTA is an SPL downgrade. Your device's "
422                     "ro.boot.verifiedbootstate="
423                  << avb_state
424                  << ", it probably has a locked bootlaoder. Since a locked "
425                     "bootloader will reject SPL downgrade no matter what, we "
426                     "will reject this OTA.";
427       return false;
428     }
429     install_plan_->powerwash_required = true;
430     LOG(WARNING)
431         << "Target build SPL " << new_spl
432         << " is older than current build's SPL " << current_spl
433         << ", this OTA is an SPL downgrade. Data wipe will be required";
434   }
435   return true;
436 }
437 
438 // Wrapper around write. Returns true if all requested bytes
439 // were written, or false on any error, regardless of progress
440 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)441 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
442   if (!error) {
443     LOG(INFO) << "Error Code is not initialized";
444     return false;
445   }
446   *error = ErrorCode::kSuccess;
447   const char* c_bytes = reinterpret_cast<const char*>(bytes);
448 
449   // Update the total byte downloaded count and the progress logs.
450   total_bytes_received_ += count;
451   UpdateOverallProgress(false, "Completed ");
452 
453   while (!manifest_valid_) {
454     bool insufficient_bytes = false;
455     if (!ParseManifest(&c_bytes, &count, error, &insufficient_bytes)) {
456       LOG(ERROR) << "Failed to parse manifest";
457       return false;
458     }
459     if (insufficient_bytes) {
460       return true;
461     }
462   }
463 
464   while (next_operation_num_ < num_total_operations_) {
465     // Check if we should cancel the current attempt for any reason.
466     // In this case, *error will have already been populated with the reason
467     // why we're canceling.
468     if (download_delegate_ && download_delegate_->ShouldCancel(error))
469       return false;
470 
471     // We know there are more operations to perform because we didn't reach the
472     // |num_total_operations_| limit yet.
473     if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
474       if (partition_writer_) {
475         if (!partition_writer_->FinishedInstallOps()) {
476           *error = ErrorCode::kDownloadWriteError;
477           return false;
478         }
479       }
480       const auto err = CloseCurrentPartition();
481       if (err < 0) {
482         LOG(ERROR) << "Failed to close partition "
483                    << partitions_[current_partition_].partition_name() << " "
484                    << strerror(-err);
485         return false;
486       }
487       // Skip until there are operations for current_partition_.
488       while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
489         current_partition_++;
490       }
491       if (!OpenCurrentPartition()) {
492         *error = ErrorCode::kInstallDeviceOpenError;
493         return false;
494       }
495     }
496 
497     const InstallOperation& op =
498         partitions_[current_partition_].operations(GetPartitionOperationNum());
499 
500     CopyDataToBuffer(&c_bytes, &count, op.data_length());
501 
502     // Check whether we received all of the next operation's data payload.
503     if (!CanPerformInstallOperation(op))
504       return true;
505     if (!ProcessOperation(&op, error)) {
506       LOG(ERROR) << "unable to process operation: "
507                  << InstallOperationTypeName(op.type())
508                  << " Error: " << utils::ErrorCodeToString(*error);
509       return false;
510     }
511 
512     next_operation_num_++;
513     UpdateOverallProgress(false, "Completed ");
514     CheckpointUpdateProgress(false);
515   }
516 
517   if (partition_writer_) {
518     TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
519   }
520   CloseCurrentPartition();
521 
522   // In major version 2, we don't add unused operation to the payload.
523   // If we already extracted the signature we should skip this step.
524   if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
525       signatures_message_data_.empty()) {
526     if (manifest_.signatures_offset() != buffer_offset_) {
527       LOG(ERROR) << "Payload signatures offset points to blob offset "
528                  << manifest_.signatures_offset()
529                  << " but signatures are expected at offset " << buffer_offset_;
530       *error = ErrorCode::kDownloadPayloadVerificationError;
531       return false;
532     }
533     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
534     // Needs more data to cover entire signature.
535     if (buffer_.size() < manifest_.signatures_size())
536       return true;
537     if (!ExtractSignatureMessage()) {
538       LOG(ERROR) << "Extract payload signature failed.";
539       *error = ErrorCode::kDownloadPayloadVerificationError;
540       return false;
541     }
542     DiscardBuffer(true, 0);
543     // Since we extracted the SignatureMessage we need to advance the
544     // checkpoint, otherwise we would reload the signature and try to extract
545     // it again.
546     // This is the last checkpoint for an update, force this checkpoint to be
547     // saved.
548     CheckpointUpdateProgress(true);
549   }
550 
551   return true;
552 }
553 
ParseManifest(const char ** c_bytes,size_t * count,ErrorCode * error,bool * should_return)554 bool DeltaPerformer::ParseManifest(const char** c_bytes,
555                                    size_t* count,
556                                    ErrorCode* error,
557                                    bool* should_return) {
558   // Read data up to the needed limit; this is either maximium payload header
559   // size, or the full metadata size (once it becomes known).
560   const bool do_read_header = !IsHeaderParsed();
561   CopyDataToBuffer(
562       c_bytes,
563       count,
564       (do_read_header ? kMaxPayloadHeaderSize
565                       : metadata_size_ + metadata_signature_size_));
566   MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
567   if (result == MetadataParseResult::kError)
568     return false;
569   if (result == MetadataParseResult::kInsufficientData) {
570     // If we just processed the header, make an attempt on the manifest.
571     if (do_read_header && IsHeaderParsed()) {
572       return true;
573     }
574     *should_return = true;
575     return true;
576   }
577 
578   // Checks the integrity of the payload manifest.
579   if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
580     return false;
581   manifest_valid_ = true;
582   if (!install_plan_->is_resume) {
583     auto begin = reinterpret_cast<const char*>(buffer_.data());
584     prefs_->SetString(kPrefsManifestBytes, {begin, buffer_.size()});
585   }
586 
587   // Clear the download buffer.
588   DiscardBuffer(false, metadata_size_);
589 
590   block_size_ = manifest_.block_size();
591 
592   if (!install_plan_->spl_downgrade && !CheckSPLDowngrade()) {
593     *error = ErrorCode::kPayloadTimestampError;
594     return false;
595   }
596 
597   // update estimate_cow_size if VABC is disabled
598   // new_cow_size per partition = partition_size - (#blocks in Copy
599   // operations part of the partition)
600   if (install_plan_->vabc_none) {
601     size_t cowOpsize = android::snapshot::GetCowOpSize(
602         manifest_.dynamic_partition_metadata().cow_version());
603     if (cowOpsize == 0) {
604       cowOpsize = sizeof(android::snapshot::CowOperationV2);
605       LOG(WARNING) << "Failed to determine cow op size for COW version "
606                    << manifest_.dynamic_partition_metadata().cow_version()
607                    << ", defaulting to " << cowOpsize;
608     }
609 
610     LOG(INFO) << "Setting Virtual AB Compression algorithm to none. This "
611                  "would also disable VABC XOR as XOR only saves space if "
612                  "compression is enabled.";
613     manifest_.mutable_dynamic_partition_metadata()->set_vabc_compression_param(
614         "none");
615     for (auto& partition : *manifest_.mutable_partitions()) {
616       if (!partition.has_estimate_cow_size()) {
617         continue;
618       }
619       auto new_cow_size = partition.new_partition_info().size();
620       for (const auto& operation : partition.merge_operations()) {
621         if (operation.type() == CowMergeOperation::COW_COPY) {
622           new_cow_size -=
623               operation.dst_extent().num_blocks() * manifest_.block_size();
624         }
625       }
626       // Remove all COW_XOR merge ops, as XOR without compression is useless.
627       // It increases CPU usage but does not reduce space usage at all.
628       auto&& merge_ops = *partition.mutable_merge_operations();
629       merge_ops.erase(std::remove_if(merge_ops.begin(),
630                                      merge_ops.end(),
631                                      [](const auto& op) {
632                                        return op.type() ==
633                                               CowMergeOperation::COW_XOR;
634                                      }),
635                       merge_ops.end());
636 
637       // Every block written to COW device will come with a header which
638       // stores src/dst block info along with other data.
639       const auto cow_metadata_size = partition.new_partition_info().size() /
640                                      manifest_.block_size() * cowOpsize;
641       // update_engine will emit a label op every op or every two seconds,
642       // whichever one is longer. In the worst case, we add 1 label per
643       // InstallOp. So take size of label ops into account.
644       const auto label_ops_size = partition.operations_size() * cowOpsize;
645       // Adding extra 2MB headroom just for any unexpected space usage.
646       // If we overrun reserved COW size, entire OTA will fail
647       // and no way for user to retry OTA
648       partition.set_estimate_cow_size(new_cow_size + (1024 * 1024 * 2) +
649                                       cow_metadata_size + label_ops_size);
650       // Setting op count max to 0 will defer to num_blocks as the op buffer
651       // size.
652       partition.set_estimate_op_count_max(0);
653       LOG(INFO) << "New COW size for partition " << partition.partition_name()
654                 << " is " << partition.estimate_cow_size();
655     }
656   }
657   if (install_plan_->disable_vabc) {
658     manifest_.mutable_dynamic_partition_metadata()->set_vabc_enabled(false);
659   }
660   if (install_plan_->enable_threading) {
661     manifest_.mutable_dynamic_partition_metadata()
662         ->mutable_vabc_feature_set()
663         ->set_threaded(install_plan_->enable_threading.value());
664     LOG(INFO) << "Attempting to "
665               << (install_plan_->enable_threading.value() ? "enable"
666                                                           : "disable")
667               << " multi-threaded compression for VABC";
668   }
669   if (install_plan_->batched_writes) {
670     manifest_.mutable_dynamic_partition_metadata()
671         ->mutable_vabc_feature_set()
672         ->set_batch_writes(true);
673     LOG(INFO) << "Attempting to enable batched writes for VABC";
674   }
675 
676   // This populates |partitions_| and the |install_plan.partitions| with the
677   // list of partitions from the manifest.
678   if (!ParseManifestPartitions(error))
679     return false;
680 
681   // |install_plan.partitions| was filled in, nothing need to be done here if
682   // the payload was already applied, returns false to terminate http fetcher,
683   // but keep |error| as ErrorCode::kSuccess.
684   if (payload_->already_applied)
685     return false;
686 
687   num_total_operations_ = 0;
688   for (const auto& partition : partitions_) {
689     num_total_operations_ += partition.operations_size();
690     acc_num_operations_.push_back(num_total_operations_);
691   }
692 
693   LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
694       << "Unable to save the manifest metadata size.";
695   LOG_IF(
696       WARNING,
697       !prefs_->SetInt64(kPrefsManifestSignatureSize, metadata_signature_size_))
698       << "Unable to save the manifest signature size.";
699 
700   if (!PrimeUpdateState()) {
701     *error = ErrorCode::kDownloadStateInitializationError;
702     LOG(ERROR) << "Unable to prime the update state.";
703     return false;
704   }
705 
706   if (next_operation_num_ < acc_num_operations_[current_partition_]) {
707     if (!OpenCurrentPartition()) {
708       *error = ErrorCode::kInstallDeviceOpenError;
709       return false;
710     }
711   }
712 
713   if (next_operation_num_ > 0)
714     UpdateOverallProgress(true, "Resuming after ");
715   LOG(INFO) << "Starting to apply update payload operations";
716   return true;
717 }
ProcessOperation(const InstallOperation * op,ErrorCode * error)718 bool DeltaPerformer::ProcessOperation(const InstallOperation* op,
719                                       ErrorCode* error) {
720   // Validate the operation unconditionally. This helps prevent the
721   // exploitation of vulnerabilities in the patching libraries, e.g. bspatch.
722   // The hash of the patch data for a given operation is embedded in the
723   // payload metadata; and thus has been verified against the public key on
724   // device.
725   // Note: Validate must be called only if CanPerformInstallOperation is
726   // called. Otherwise, we might be failing operations before even if there
727   // isn't sufficient data to compute the proper hash.
728   *error = ValidateOperationHash(*op);
729   if (*error != ErrorCode::kSuccess) {
730     if (install_plan_->hash_checks_mandatory) {
731       LOG(ERROR) << "Mandatory operation hash check failed";
732       return false;
733     }
734 
735     // For non-mandatory cases, just send a UMA stat.
736     LOG(WARNING) << "Ignoring operation validation errors";
737     *error = ErrorCode::kSuccess;
738   }
739 
740   // Makes sure we unblock exit when this operation completes.
741   ScopedTerminatorExitUnblocker exit_unblocker =
742       ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
743 
744   base::TimeTicks op_start_time = base::TimeTicks::Now();
745 
746   bool op_result{};
747   const string op_name = InstallOperationTypeName(op->type());
748   switch (op->type()) {
749     case InstallOperation::REPLACE:
750     case InstallOperation::REPLACE_BZ:
751     case InstallOperation::REPLACE_XZ:
752       op_result = PerformReplaceOperation(*op);
753       OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
754       break;
755     case InstallOperation::ZERO:
756     case InstallOperation::DISCARD:
757       op_result = PerformZeroOrDiscardOperation(*op);
758       OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
759       break;
760     case InstallOperation::SOURCE_COPY:
761       op_result = PerformSourceCopyOperation(*op, error);
762       OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
763       break;
764     case InstallOperation::SOURCE_BSDIFF:
765     case InstallOperation::BROTLI_BSDIFF:
766     case InstallOperation::PUFFDIFF:
767     case InstallOperation::ZUCCHINI:
768     case InstallOperation::LZ4DIFF_PUFFDIFF:
769     case InstallOperation::LZ4DIFF_BSDIFF:
770       op_result = PerformDiffOperation(*op, error);
771       OP_DURATION_HISTOGRAM(op_name, op_start_time);
772       break;
773     default:
774       op_result = false;
775   }
776   if (!HandleOpResult(op_result, op_name.c_str(), error))
777     return false;
778 
779   return true;
780 }
781 
IsManifestValid()782 bool DeltaPerformer::IsManifestValid() {
783   return manifest_valid_;
784 }
785 
ParseManifestPartitions(ErrorCode * error)786 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
787   partitions_.assign(manifest_.partitions().begin(),
788                      manifest_.partitions().end());
789 
790   // For VAB and partial updates, the partition preparation will copy the
791   // dynamic partitions metadata to the target metadata slot, and rename the
792   // slot suffix of the partitions in the metadata.
793   if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
794     uint64_t required_size = 0;
795     if (!PreparePartitionsForUpdate(&required_size, error)) {
796       if (*error == ErrorCode::kOverlayfsenabledError) {
797         return false;
798       } else if (required_size > 0) {
799         *error = ErrorCode::kNotEnoughSpace;
800       } else {
801         *error = ErrorCode::kInstallDeviceOpenError;
802       }
803       return false;
804     }
805   }
806 
807   // Partitions in manifest are no longer needed after preparing partitions.
808   manifest_.clear_partitions();
809   // TODO(xunchang) TBD: allow partial update only on devices with dynamic
810   // partition.
811   if (manifest_.partial_update()) {
812     std::set<std::string> touched_partitions;
813     for (const auto& partition_update : partitions_) {
814       touched_partitions.insert(partition_update.partition_name());
815     }
816 
817     auto generator = partition_update_generator::Create(boot_control_,
818                                                         manifest_.block_size());
819     std::vector<PartitionUpdate> untouched_static_partitions;
820     if (!generator->GenerateOperationsForPartitionsNotInPayload(
821             install_plan_->source_slot,
822             install_plan_->target_slot,
823             touched_partitions,
824             &untouched_static_partitions)) {
825       LOG(ERROR)
826           << "Failed to generate operations for partitions not in payload "
827           << android::base::Join(touched_partitions, ", ");
828       *error = ErrorCode::kDownloadStateInitializationError;
829       return false;
830     }
831     partitions_.insert(partitions_.end(),
832                        untouched_static_partitions.begin(),
833                        untouched_static_partitions.end());
834 
835     // Save the untouched dynamic partitions in install plan.
836     std::vector<std::string> dynamic_partitions;
837     if (!boot_control_->GetDynamicPartitionControl()
838              ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
839                                             boot_control_->GetCurrentSlot(),
840                                             &dynamic_partitions)) {
841       LOG(ERROR) << "Failed to load dynamic partitions from slot "
842                  << install_plan_->source_slot;
843       return false;
844     }
845     install_plan_->untouched_dynamic_partitions.clear();
846     for (const auto& name : dynamic_partitions) {
847       if (touched_partitions.find(name) == touched_partitions.end()) {
848         install_plan_->untouched_dynamic_partitions.push_back(name);
849       }
850     }
851   }
852 
853   const auto start = std::chrono::system_clock::now();
854   if (!install_plan_->ParsePartitions(
855           partitions_, boot_control_, block_size_, error)) {
856     return false;
857   }
858   const auto duration = std::chrono::system_clock::now() - start;
859   LOG(INFO)
860       << "ParsePartitions done. took "
861       << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count()
862       << " ms";
863 
864   auto&& has_verity = [](const auto& part) {
865     return part.fec_extent().num_blocks() > 0 ||
866            part.hash_tree_extent().num_blocks() > 0;
867   };
868   if (!std::any_of(partitions_.begin(), partitions_.end(), has_verity)) {
869     install_plan_->write_verity = false;
870   }
871 
872   LogPartitionInfo(partitions_);
873   return true;
874 }
875 
PreparePartitionsForUpdate(uint64_t * required_size,ErrorCode * error)876 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size,
877                                                 ErrorCode* error) {
878   // Call static PreparePartitionsForUpdate with hash from
879   // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
880   // preallocated for is the same as the hash of payload being applied.
881   string update_check_response_hash;
882   ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
883                                   &update_check_response_hash));
884   return PreparePartitionsForUpdate(prefs_,
885                                     boot_control_,
886                                     install_plan_->target_slot,
887                                     manifest_,
888                                     update_check_response_hash,
889                                     required_size,
890                                     error);
891 }
892 
PreparePartitionsForUpdate(PrefsInterface * prefs,BootControlInterface * boot_control,BootControlInterface::Slot target_slot,const DeltaArchiveManifest & manifest,const std::string & update_check_response_hash,uint64_t * required_size,ErrorCode * error)893 bool DeltaPerformer::PreparePartitionsForUpdate(
894     PrefsInterface* prefs,
895     BootControlInterface* boot_control,
896     BootControlInterface::Slot target_slot,
897     const DeltaArchiveManifest& manifest,
898     const std::string& update_check_response_hash,
899     uint64_t* required_size,
900     ErrorCode* error) {
901   string last_hash;
902   ignore_result(
903       prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
904 
905   bool is_resume = !update_check_response_hash.empty() &&
906                    last_hash == update_check_response_hash;
907 
908   if (is_resume) {
909     LOG(INFO) << "Using previously prepared partitions for update. hash = "
910               << last_hash;
911   } else {
912     LOG(INFO) << "Preparing partitions for new update. last hash = "
913               << last_hash << ", new hash = " << update_check_response_hash;
914     ResetUpdateProgress(prefs, false);
915   }
916 
917   const auto start = std::chrono::system_clock::now();
918   if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
919           boot_control->GetCurrentSlot(),
920           target_slot,
921           manifest,
922           !is_resume /* should update */,
923           required_size,
924           error)) {
925     LOG(ERROR) << "Unable to initialize partition metadata for slot "
926                << BootControlInterface::SlotName(target_slot) << " "
927                << utils::ErrorCodeToString(*error);
928     return false;
929   }
930   const auto duration = std::chrono::system_clock::now() - start;
931 
932   TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
933                                          update_check_response_hash));
934   LOG(INFO)
935       << "PreparePartitionsForUpdate done. took "
936       << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count()
937       << " ms";
938 
939   return true;
940 }
941 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)942 bool DeltaPerformer::CanPerformInstallOperation(
943     const chromeos_update_engine::InstallOperation& operation) {
944   // If we don't have a data blob we can apply it right away.
945   if (!operation.has_data_offset() && !operation.has_data_length())
946     return true;
947 
948   // See if we have the entire data blob in the buffer
949   if (operation.data_offset() < buffer_offset_) {
950     LOG(ERROR) << "we threw away data it seems?";
951     return false;
952   }
953 
954   return (operation.data_offset() + operation.data_length() <=
955           buffer_offset_ + buffer_.size());
956 }
957 
PerformReplaceOperation(const InstallOperation & operation)958 bool DeltaPerformer::PerformReplaceOperation(
959     const InstallOperation& operation) {
960   CHECK(operation.type() == InstallOperation::REPLACE ||
961         operation.type() == InstallOperation::REPLACE_BZ ||
962         operation.type() == InstallOperation::REPLACE_XZ);
963 
964   // Since we delete data off the beginning of the buffer as we use it,
965   // the data we need should be exactly at the beginning of the buffer.
966   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
967 
968   TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation(
969       operation, buffer_.data(), buffer_.size()));
970   // Update buffer
971   DiscardBuffer(true, buffer_.size());
972   return true;
973 }
974 
PerformZeroOrDiscardOperation(const InstallOperation & operation)975 bool DeltaPerformer::PerformZeroOrDiscardOperation(
976     const InstallOperation& operation) {
977   CHECK(operation.type() == InstallOperation::DISCARD ||
978         operation.type() == InstallOperation::ZERO);
979 
980   // These operations have no blob.
981   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
982   TEST_AND_RETURN_FALSE(!operation.has_data_length());
983 
984   return partition_writer_->PerformZeroOrDiscardOperation(operation);
985 }
986 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)987 bool DeltaPerformer::PerformSourceCopyOperation(
988     const InstallOperation& operation, ErrorCode* error) {
989   if (operation.has_src_length())
990     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
991   if (operation.has_dst_length())
992     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
993   return partition_writer_->PerformSourceCopyOperation(operation, error);
994 }
995 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)996 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
997     const RepeatedPtrField<Extent>& extents,
998     uint64_t block_size,
999     uint64_t full_length,
1000     string* positions_string) {
1001   string ret;
1002   uint64_t length = 0;
1003   for (const Extent& extent : extents) {
1004     int64_t start = extent.start_block() * block_size;
1005     uint64_t this_length =
1006         min(full_length - length,
1007             static_cast<uint64_t>(extent.num_blocks()) * block_size);
1008     ret += android::base::StringPrintf(
1009         "%" PRIi64 ":%" PRIu64 ",", start, this_length);
1010     length += this_length;
1011   }
1012   TEST_AND_RETURN_FALSE(length == full_length);
1013   if (!ret.empty())
1014     ret.resize(ret.size() - 1);  // Strip trailing comma off
1015   *positions_string = ret;
1016   return true;
1017 }
1018 
PerformDiffOperation(const InstallOperation & operation,ErrorCode * error)1019 bool DeltaPerformer::PerformDiffOperation(const InstallOperation& operation,
1020                                           ErrorCode* error) {
1021   // Since we delete data off the beginning of the buffer as we use it,
1022   // the data we need should be exactly at the beginning of the buffer.
1023   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1024   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1025   if (operation.has_src_length())
1026     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1027   if (operation.has_dst_length())
1028     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1029 
1030   TEST_AND_RETURN_FALSE(partition_writer_->PerformDiffOperation(
1031       operation, error, buffer_.data(), buffer_.size()));
1032   DiscardBuffer(true, buffer_.size());
1033   return true;
1034 }
1035 
ExtractSignatureMessage()1036 bool DeltaPerformer::ExtractSignatureMessage() {
1037   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1038   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1039   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1040   signatures_message_data_.assign(
1041       buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
1042 
1043   LOG(INFO) << "Extracted signature data of size "
1044             << manifest_.signatures_size() << " at "
1045             << manifest_.signatures_offset();
1046   return true;
1047 }
1048 
GetPublicKey(string * out_public_key)1049 bool DeltaPerformer::GetPublicKey(string* out_public_key) {
1050   out_public_key->clear();
1051 
1052   if (utils::FileExists(public_key_path_.c_str())) {
1053     LOG(INFO) << "Verifying using public key: " << public_key_path_;
1054     return utils::ReadFile(public_key_path_, out_public_key);
1055   }
1056 
1057   // If this is an official build then we are not allowed to use public key
1058   // from Omaha response.
1059   if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
1060     LOG(INFO) << "Verifying using public key from Omaha response.";
1061     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
1062                                                out_public_key);
1063   }
1064   LOG(INFO) << "No public keys found for verification.";
1065   return true;
1066 }
1067 
1068 std::pair<std::unique_ptr<PayloadVerifier>, bool>
CreatePayloadVerifier()1069 DeltaPerformer::CreatePayloadVerifier() {
1070   if (utils::FileExists(update_certificates_path_.c_str())) {
1071     LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
1072     return {
1073         PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
1074         true};
1075   }
1076 
1077   string public_key;
1078   if (!GetPublicKey(&public_key)) {
1079     LOG(ERROR) << "Failed to read public key";
1080     return {nullptr, true};
1081   }
1082 
1083   // Skips the verification if the public key is empty.
1084   if (public_key.empty()) {
1085     return {nullptr, false};
1086   }
1087   LOG(INFO) << "Verifing using public key: " << public_key;
1088   return {PayloadVerifier::CreateInstance(public_key), true};
1089 }
1090 
ValidateManifest()1091 ErrorCode DeltaPerformer::ValidateManifest() {
1092   // Perform assorted checks to validation check the manifest, make sure it
1093   // matches data from other sources, and that it is a supported version.
1094   bool has_old_fields = std::any_of(manifest_.partitions().begin(),
1095                                     manifest_.partitions().end(),
1096                                     [](const PartitionUpdate& partition) {
1097                                       return partition.has_old_partition_info();
1098                                     });
1099 
1100   // The presence of an old partition hash is the sole indicator for a delta
1101   // update. Also, always treat the partial update as delta so that we can
1102   // perform the minor version check correctly.
1103   InstallPayloadType actual_payload_type =
1104       (has_old_fields || manifest_.partial_update())
1105           ? InstallPayloadType::kDelta
1106           : InstallPayloadType::kFull;
1107 
1108   if (payload_->type == InstallPayloadType::kUnknown) {
1109     LOG(INFO) << "Detected a '"
1110               << InstallPayloadTypeToString(actual_payload_type)
1111               << "' payload.";
1112     payload_->type = actual_payload_type;
1113   } else if (payload_->type != actual_payload_type) {
1114     LOG(ERROR) << "InstallPlan expected a '"
1115                << InstallPayloadTypeToString(payload_->type)
1116                << "' payload but the downloaded manifest contains a '"
1117                << InstallPayloadTypeToString(actual_payload_type)
1118                << "' payload.";
1119     return ErrorCode::kPayloadMismatchedType;
1120   }
1121   // Check that the minor version is compatible.
1122   // TODO(xunchang) increment minor version & add check for partial update
1123   if (actual_payload_type == InstallPayloadType::kFull) {
1124     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1125       LOG(ERROR) << "Manifest contains minor version "
1126                  << manifest_.minor_version()
1127                  << ", but all full payloads should have version "
1128                  << kFullPayloadMinorVersion << ".";
1129       return ErrorCode::kUnsupportedMinorPayloadVersion;
1130     }
1131   } else {
1132     if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
1133         manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
1134       LOG(ERROR) << "Manifest contains minor version "
1135                  << manifest_.minor_version()
1136                  << " not in the range of supported minor versions ["
1137                  << kMinSupportedMinorPayloadVersion << ", "
1138                  << kMaxSupportedMinorPayloadVersion << "].";
1139       return ErrorCode::kUnsupportedMinorPayloadVersion;
1140     }
1141   }
1142 
1143   ErrorCode error_code = CheckTimestampError();
1144   if (error_code != ErrorCode::kSuccess) {
1145     if (error_code == ErrorCode::kPayloadTimestampError) {
1146       if (!hardware_->AllowDowngrade()) {
1147         return ErrorCode::kPayloadTimestampError;
1148       }
1149       LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
1150                    " the payload with an older timestamp.";
1151     } else {
1152       LOG(ERROR) << "Timestamp check returned "
1153                  << utils::ErrorCodeToString(error_code);
1154       return error_code;
1155     }
1156   }
1157 
1158   // TODO(crbug.com/37661) we should be adding more and more manifest checks,
1159   // such as partition boundaries, etc.
1160 
1161   return ErrorCode::kSuccess;
1162 }
1163 
CheckTimestampError() const1164 ErrorCode DeltaPerformer::CheckTimestampError() const {
1165   bool is_partial_update =
1166       manifest_.has_partial_update() && manifest_.partial_update();
1167   const auto& partitions = manifest_.partitions();
1168 
1169   // Check version field for a given PartitionUpdate object. If an error
1170   // is encountered, set |error_code| accordingly. If downgrade is detected,
1171   // |downgrade_detected| is set. Return true if the program should continue
1172   // to check the next partition or not, or false if it should exit early due
1173   // to errors.
1174   auto&& timestamp_valid = [this](const PartitionUpdate& partition,
1175                                   bool allow_empty_version,
1176                                   bool* downgrade_detected) -> ErrorCode {
1177     const auto& partition_name = partition.partition_name();
1178     if (!partition.has_version()) {
1179       if (hardware_->GetVersionForLogging(partition_name).empty()) {
1180         LOG(INFO) << partition_name << " does't have version, skipping "
1181                   << "downgrade check.";
1182         return ErrorCode::kSuccess;
1183       }
1184 
1185       if (allow_empty_version) {
1186         return ErrorCode::kSuccess;
1187       }
1188       LOG(ERROR)
1189           << "PartitionUpdate " << partition_name
1190           << " doesn't have a version field. Not allowed in partial updates.";
1191       return ErrorCode::kDownloadManifestParseError;
1192     }
1193 
1194     auto error_code =
1195         hardware_->IsPartitionUpdateValid(partition_name, partition.version());
1196     switch (error_code) {
1197       case ErrorCode::kSuccess:
1198         break;
1199       case ErrorCode::kPayloadTimestampError:
1200         *downgrade_detected = true;
1201         LOG(WARNING) << "PartitionUpdate " << partition_name
1202                      << " has an older version than partition on device.";
1203         break;
1204       default:
1205         LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name
1206                    << ") returned" << utils::ErrorCodeToString(error_code);
1207         break;
1208     }
1209     return error_code;
1210   };
1211 
1212   bool downgrade_detected = false;
1213 
1214   if (is_partial_update) {
1215     // for partial updates, all partition MUST have valid timestamps
1216     // But max_timestamp can be empty
1217     for (const auto& partition : partitions) {
1218       auto error_code = timestamp_valid(
1219           partition, false /* allow_empty_version */, &downgrade_detected);
1220       if (error_code != ErrorCode::kSuccess &&
1221           error_code != ErrorCode::kPayloadTimestampError) {
1222         return error_code;
1223       }
1224     }
1225     if (downgrade_detected) {
1226       return ErrorCode::kPayloadTimestampError;
1227     }
1228     return ErrorCode::kSuccess;
1229   }
1230 
1231   // For non-partial updates, check max_timestamp first.
1232   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1233     LOG(ERROR) << "The current OS build timestamp ("
1234                << hardware_->GetBuildTimestamp()
1235                << ") is newer than the maximum timestamp in the manifest ("
1236                << manifest_.max_timestamp() << ")";
1237     return ErrorCode::kPayloadTimestampError;
1238   }
1239   // Otherwise... partitions can have empty timestamps.
1240   for (const auto& partition : partitions) {
1241     auto error_code = timestamp_valid(
1242         partition, true /* allow_empty_version */, &downgrade_detected);
1243     if (error_code != ErrorCode::kSuccess &&
1244         error_code != ErrorCode::kPayloadTimestampError) {
1245       return error_code;
1246     }
1247   }
1248   if (downgrade_detected) {
1249     return ErrorCode::kPayloadTimestampError;
1250   }
1251   return ErrorCode::kSuccess;
1252 }
1253 
ValidateOperationHash(const InstallOperation & operation)1254 ErrorCode DeltaPerformer::ValidateOperationHash(
1255     const InstallOperation& operation) {
1256   if (!operation.data_sha256_hash().size()) {
1257     if (!operation.data_length()) {
1258       // Operations that do not have any data blob won't have any operation
1259       // hash either. So, these operations are always considered validated
1260       // since the metadata that contains all the non-data-blob portions of
1261       // the operation has already been validated. This is true for both HTTP
1262       // and HTTPS cases.
1263       return ErrorCode::kSuccess;
1264     }
1265 
1266     // No hash is present for an operation that has data blobs. This shouldn't
1267     // happen normally for any client that has this code, because the
1268     // corresponding update should have been produced with the operation
1269     // hashes. So if it happens it means either we've turned operation hash
1270     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1271     // One caveat though: The last operation is a unused signature operation
1272     // that doesn't have a hash at the time the manifest is created. So we
1273     // should not complaint about that operation. This operation can be
1274     // recognized by the fact that it's offset is mentioned in the manifest.
1275     if (manifest_.signatures_offset() &&
1276         manifest_.signatures_offset() == operation.data_offset()) {
1277       LOG(INFO) << "Skipping hash verification for signature operation "
1278                 << next_operation_num_ + 1;
1279     } else {
1280       if (install_plan_->hash_checks_mandatory) {
1281         LOG(ERROR) << "Missing mandatory operation hash for operation "
1282                    << next_operation_num_ + 1;
1283         return ErrorCode::kDownloadOperationHashMissingError;
1284       }
1285 
1286       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1287                    << " as there's no operation hash in manifest";
1288     }
1289     return ErrorCode::kSuccess;
1290   }
1291 
1292   brillo::Blob expected_op_hash;
1293   expected_op_hash.assign(operation.data_sha256_hash().data(),
1294                           (operation.data_sha256_hash().data() +
1295                            operation.data_sha256_hash().size()));
1296 
1297   brillo::Blob calculated_op_hash;
1298   if (!HashCalculator::RawHashOfBytes(
1299           buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1300     LOG(ERROR) << "Unable to compute actual hash of operation "
1301                << next_operation_num_;
1302     return ErrorCode::kDownloadOperationHashVerificationError;
1303   }
1304 
1305   if (calculated_op_hash != expected_op_hash) {
1306     LOG(ERROR) << "Hash verification failed for operation "
1307                << next_operation_num_
1308                << ". Expected hash = " << HexEncode(expected_op_hash);
1309     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1310                << " bytes at offset: " << operation.data_offset() << " = "
1311                << HexEncode(calculated_op_hash);
1312     return ErrorCode::kDownloadOperationHashMismatch;
1313   }
1314 
1315   return ErrorCode::kSuccess;
1316 }
1317 
1318 #define TEST_AND_RETURN_VAL(_retval, _condition)              \
1319   do {                                                        \
1320     if (!(_condition)) {                                      \
1321       LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
1322       return _retval;                                         \
1323     }                                                         \
1324   } while (0);
1325 
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1326 ErrorCode DeltaPerformer::VerifyPayload(
1327     const brillo::Blob& update_check_response_hash,
1328     const uint64_t update_check_response_size) {
1329   // Verifies the download size.
1330   if (update_check_response_size !=
1331       metadata_size_ + metadata_signature_size_ + buffer_offset_) {
1332     LOG(ERROR) << "update_check_response_size (" << update_check_response_size
1333                << ") doesn't match metadata_size (" << metadata_size_
1334                << ") + metadata_signature_size (" << metadata_signature_size_
1335                << ") + buffer_offset (" << buffer_offset_ << ").";
1336     return ErrorCode::kPayloadSizeMismatchError;
1337   }
1338 
1339   // Verifies the payload hash.
1340   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1341                       !payload_hash_calculator_.raw_hash().empty());
1342   if (payload_hash_calculator_.raw_hash() != update_check_response_hash) {
1343     LOG(ERROR) << "Actual hash: "
1344                << HexEncode(payload_hash_calculator_.raw_hash())
1345                << ", expected hash: " << HexEncode(update_check_response_hash);
1346     return ErrorCode::kPayloadHashMismatchError;
1347   }
1348 
1349   // NOLINTNEXTLINE(whitespace/braces)
1350   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
1351   if (!perform_verification) {
1352     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1353     return ErrorCode::kSuccess;
1354   }
1355   if (!payload_verifier) {
1356     LOG(ERROR) << "Failed to create the payload verifier.";
1357     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1358   }
1359 
1360   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1361                       !signatures_message_data_.empty());
1362   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1363   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1364                       hash_data.size() == kSHA256Size);
1365 
1366   if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
1367     // The autoupdate_CatchBadSignatures test checks for this string
1368     // in log-files. Keep in sync.
1369     LOG(ERROR) << "Public key verification failed, thus update failed.";
1370     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1371   }
1372 
1373   LOG(INFO) << "Payload hash matches value in payload.";
1374   return ErrorCode::kSuccess;
1375 }
1376 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1377 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1378                                    size_t signed_hash_buffer_size) {
1379   // Update the buffer offset.
1380   if (do_advance_offset)
1381     buffer_offset_ += buffer_.size();
1382 
1383   // Hash the content.
1384   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1385   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1386 
1387   // Swap content with an empty vector to ensure that all memory is released.
1388   brillo::Blob().swap(buffer_);
1389 }
1390 
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1391 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1392                                      const string& update_check_response_hash) {
1393   int64_t next_operation = kUpdateStateOperationInvalid;
1394   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1395         next_operation != kUpdateStateOperationInvalid && next_operation > 0)) {
1396     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateNextOperation
1397                  << " invalid: " << next_operation;
1398     return false;
1399   }
1400 
1401   string interrupted_hash;
1402   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1403         !interrupted_hash.empty() &&
1404         interrupted_hash == update_check_response_hash)) {
1405     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateCheckResponseHash
1406                  << " mismatch, last hash: " << interrupted_hash
1407                  << ", current hash: " << update_check_response_hash << "";
1408     return false;
1409   }
1410 
1411   int64_t resumed_update_failures{};
1412   // Note that storing this value is optional, but if it is there it should
1413   // not be more than the limit.
1414   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1415       resumed_update_failures > kMaxResumedUpdateFailures) {
1416     LOG(WARNING) << "Failed to resume update " << kPrefsResumedUpdateFailures
1417                  << " has value " << resumed_update_failures
1418                  << " is over the limit " << kMaxResumedUpdateFailures;
1419     return false;
1420   }
1421 
1422   // Validation check the rest.
1423   int64_t next_data_offset = -1;
1424   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1425         next_data_offset >= 0)) {
1426     LOG(WARNING) << "Failed to resume update "
1427                  << kPrefsUpdateStateNextDataOffset
1428                  << " invalid: " << next_data_offset;
1429     return false;
1430   }
1431 
1432   string sha256_context;
1433   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1434         !sha256_context.empty())) {
1435     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateSHA256Context
1436                  << " is empty.";
1437     return false;
1438   }
1439 
1440   int64_t manifest_metadata_size = 0;
1441   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1442         manifest_metadata_size > 0)) {
1443     LOG(WARNING) << "Failed to resume update " << kPrefsManifestMetadataSize
1444                  << " invalid: " << manifest_metadata_size;
1445     return false;
1446   }
1447 
1448   int64_t manifest_signature_size = 0;
1449   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1450                         &manifest_signature_size) &&
1451         manifest_signature_size >= 0)) {
1452     LOG(WARNING) << "Failed to resume update " << kPrefsManifestSignatureSize
1453                  << " invalid: " << manifest_signature_size;
1454     return false;
1455   }
1456 
1457   return true;
1458 }
1459 
ResetUpdateProgress(PrefsInterface * prefs,bool quick,bool skip_dynamic_partititon_metadata_updated)1460 bool DeltaPerformer::ResetUpdateProgress(
1461     PrefsInterface* prefs,
1462     bool quick,
1463     bool skip_dynamic_partititon_metadata_updated) {
1464   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1465                                         kUpdateStateOperationInvalid));
1466   if (!quick) {
1467     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1468     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1469     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1470     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1471     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1472     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1473     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1474     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1475     prefs->Delete(kPrefsPostInstallSucceeded);
1476     prefs->Delete(kPrefsVerityWritten);
1477     if (!skip_dynamic_partititon_metadata_updated) {
1478       LOG(INFO) << "Resetting recorded hash for prepared partitions.";
1479       prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
1480     }
1481   }
1482   return true;
1483 }
1484 
ShouldCheckpoint()1485 bool DeltaPerformer::ShouldCheckpoint() {
1486   base::TimeTicks curr_time = base::TimeTicks::Now();
1487   if (curr_time > update_checkpoint_time_) {
1488     update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
1489     return true;
1490   }
1491   return false;
1492 }
1493 
CheckpointUpdateProgress(bool force)1494 bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
1495   if (!force && !ShouldCheckpoint()) {
1496     return false;
1497   }
1498   Terminator::set_exit_blocked(true);
1499   LOG_IF(WARNING, !prefs_->StartTransaction())
1500       << "unable to start transaction in checkpointing";
1501   DEFER {
1502     prefs_->CancelTransaction();
1503   };
1504   if (last_updated_operation_num_ != next_operation_num_ || force) {
1505     if (!signatures_message_data_.empty()) {
1506       // Save the signature blob because if the update is interrupted after the
1507       // download phase we don't go through this path anymore. Some alternatives
1508       // to consider:
1509       //
1510       // 1. On resume, re-download the signature blob from the server and
1511       // re-verify it.
1512       //
1513       // 2. Verify the signature as soon as it's received and don't checkpoint
1514       // the blob and the signed sha-256 context.
1515       LOG_IF(WARNING,
1516              !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1517                                 signatures_message_data_))
1518           << "Unable to store the signature blob.";
1519     }
1520     TEST_AND_RETURN_FALSE(prefs_->SetString(
1521         kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
1522     TEST_AND_RETURN_FALSE(
1523         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1524                           signed_hash_calculator_.GetContext()));
1525     TEST_AND_RETURN_FALSE(
1526         prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
1527     last_updated_operation_num_ = next_operation_num_;
1528 
1529     if (next_operation_num_ < num_total_operations_) {
1530       size_t partition_index = current_partition_;
1531       while (next_operation_num_ >= acc_num_operations_[partition_index]) {
1532         partition_index++;
1533       }
1534       const size_t partition_operation_num =
1535           next_operation_num_ -
1536           (partition_index ? acc_num_operations_[partition_index - 1] : 0);
1537       const InstallOperation& op =
1538           partitions_[partition_index].operations(partition_operation_num);
1539       TEST_AND_RETURN_FALSE(
1540           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
1541     } else {
1542       TEST_AND_RETURN_FALSE(
1543           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
1544     }
1545     if (partition_writer_) {
1546       partition_writer_->CheckpointUpdateProgress(GetPartitionOperationNum());
1547     } else {
1548       CHECK_EQ(next_operation_num_, num_total_operations_)
1549           << "Partition writer is null, we are expected to finish all "
1550              "operations: "
1551           << next_operation_num_ << "/" << num_total_operations_;
1552     }
1553   }
1554   TEST_AND_RETURN_FALSE(
1555       prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
1556   if (!prefs_->SubmitTransaction()) {
1557     LOG(ERROR) << "Failed to submit transaction in checkpointing";
1558   }
1559   return true;
1560 }
1561 
PrimeUpdateState()1562 bool DeltaPerformer::PrimeUpdateState() {
1563   CHECK(manifest_valid_);
1564 
1565   int64_t next_operation = kUpdateStateOperationInvalid;
1566   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1567       next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
1568     // Initiating a new update, no more state needs to be initialized.
1569     return true;
1570   }
1571   next_operation_num_ = next_operation;
1572 
1573   // Resuming an update -- load the rest of the update state.
1574   int64_t next_data_offset = -1;
1575   TEST_AND_RETURN_FALSE(
1576       prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1577       next_data_offset >= 0);
1578   buffer_offset_ = next_data_offset;
1579 
1580   // The signed hash context and the signature blob may be empty if the
1581   // interrupted update didn't reach the signature.
1582   string signed_hash_context;
1583   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1584                         &signed_hash_context)) {
1585     TEST_AND_RETURN_FALSE(
1586         signed_hash_calculator_.SetContext(signed_hash_context));
1587   }
1588 
1589   prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
1590 
1591   string hash_context;
1592   TEST_AND_RETURN_FALSE(
1593       prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
1594       payload_hash_calculator_.SetContext(hash_context));
1595 
1596   int64_t manifest_metadata_size = 0;
1597   TEST_AND_RETURN_FALSE(
1598       prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1599       manifest_metadata_size > 0);
1600   metadata_size_ = manifest_metadata_size;
1601 
1602   int64_t manifest_signature_size = 0;
1603   TEST_AND_RETURN_FALSE(
1604       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1605       manifest_signature_size >= 0);
1606   metadata_signature_size_ = manifest_signature_size;
1607 
1608   // Advance the download progress to reflect what doesn't need to be
1609   // re-downloaded.
1610   total_bytes_received_ += buffer_offset_;
1611 
1612   // Speculatively count the resume as a failure.
1613   int64_t resumed_update_failures{};
1614   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1615     resumed_update_failures++;
1616   } else {
1617     resumed_update_failures = 1;
1618   }
1619   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1620   return true;
1621 }
1622 
IsDynamicPartition(const std::string & part_name,uint32_t slot)1623 bool DeltaPerformer::IsDynamicPartition(const std::string& part_name,
1624                                         uint32_t slot) {
1625   return boot_control_->GetDynamicPartitionControl()->IsDynamicPartition(
1626       part_name, slot);
1627 }
1628 
CreatePartitionWriter(const PartitionUpdate & partition_update,const InstallPlan::Partition & install_part,DynamicPartitionControlInterface * dynamic_control,size_t block_size,bool is_interactive,bool is_dynamic_partition)1629 std::unique_ptr<PartitionWriterInterface> DeltaPerformer::CreatePartitionWriter(
1630     const PartitionUpdate& partition_update,
1631     const InstallPlan::Partition& install_part,
1632     DynamicPartitionControlInterface* dynamic_control,
1633     size_t block_size,
1634     bool is_interactive,
1635     bool is_dynamic_partition) {
1636   return partition_writer::CreatePartitionWriter(
1637       partition_update,
1638       install_part,
1639       dynamic_control,
1640       block_size_,
1641       interactive_,
1642       IsDynamicPartition(install_part.name, install_plan_->target_slot));
1643 }
1644 
1645 }  // namespace chromeos_update_engine
1646