• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <errno.h>
20 #include <linux/fs.h>
21 
22 #include <algorithm>
23 #include <cstring>
24 #include <map>
25 #include <memory>
26 #include <set>
27 #include <string>
28 #include <utility>
29 #include <vector>
30 
31 #include <android-base/properties.h>
32 #include <android-base/strings.h>
33 #include <base/files/file_util.h>
34 #include <base/format_macros.h>
35 #include <base/metrics/histogram_macros.h>
36 #include <base/strings/string_number_conversions.h>
37 #include <base/strings/stringprintf.h>
38 #include <base/time/time.h>
39 #include <brillo/data_encoding.h>
40 #include <bsdiff/bspatch.h>
41 #include <google/protobuf/repeated_field.h>
42 #include <puffin/puffpatch.h>
43 
44 #include "libsnapshot/cow_format.h"
45 #include "update_engine/common/constants.h"
46 #include "update_engine/common/download_action.h"
47 #include "update_engine/common/error_code.h"
48 #include "update_engine/common/error_code_utils.h"
49 #include "update_engine/common/hardware_interface.h"
50 #include "update_engine/common/prefs_interface.h"
51 #include "update_engine/common/terminator.h"
52 #include "update_engine/common/utils.h"
53 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
54 #include "update_engine/payload_consumer/partition_writer.h"
55 #include "update_engine/update_metadata.pb.h"
56 #if USE_FEC
57 #include "update_engine/payload_consumer/fec_file_descriptor.h"
58 #endif  // USE_FEC
59 #include "update_engine/payload_consumer/payload_constants.h"
60 #include "update_engine/payload_consumer/payload_verifier.h"
61 
62 using google::protobuf::RepeatedPtrField;
63 using std::min;
64 using std::string;
65 using std::vector;
66 
67 namespace chromeos_update_engine {
68 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
69 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
70 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
71 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
72 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
73 
74 namespace {
75 const int kUpdateStateOperationInvalid = -1;
76 const int kMaxResumedUpdateFailures = 10;
77 
78 }  // namespace
79 
80 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
81 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)82 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
83   return part * norm / total;
84 }
85 
LogProgress(const char * message_prefix)86 void DeltaPerformer::LogProgress(const char* message_prefix) {
87   // Format operations total count and percentage.
88   string total_operations_str("?");
89   string completed_percentage_str("");
90   if (num_total_operations_) {
91     total_operations_str = std::to_string(num_total_operations_);
92     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
93     completed_percentage_str = base::StringPrintf(
94         " (%" PRIu64 "%%)",
95         IntRatio(next_operation_num_, num_total_operations_, 100));
96   }
97 
98   // Format download total count and percentage.
99   size_t payload_size = payload_->size;
100   string payload_size_str("?");
101   string downloaded_percentage_str("");
102   if (payload_size) {
103     payload_size_str = std::to_string(payload_size);
104     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
105     downloaded_percentage_str = base::StringPrintf(
106         " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
107   }
108 
109   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
110             << "/" << total_operations_str << " operations"
111             << completed_percentage_str << ", " << total_bytes_received_ << "/"
112             << payload_size_str << " bytes downloaded"
113             << downloaded_percentage_str << ", overall progress "
114             << overall_progress_ << "%";
115 }
116 
UpdateOverallProgress(bool force_log,const char * message_prefix)117 void DeltaPerformer::UpdateOverallProgress(bool force_log,
118                                            const char* message_prefix) {
119   // Compute our download and overall progress.
120   unsigned new_overall_progress = 0;
121   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
122                 "Progress weights don't add up");
123   // Only consider download progress if its total size is known; otherwise
124   // adjust the operations weight to compensate for the absence of download
125   // progress. Also, make sure to cap the download portion at
126   // kProgressDownloadWeight, in case we end up downloading more than we
127   // initially expected (this indicates a problem, but could generally happen).
128   // TODO(garnold) the correction of operations weight when we do not have the
129   // total payload size, as well as the conditional guard below, should both be
130   // eliminated once we ensure that the payload_size in the install plan is
131   // always given and is non-zero. This currently isn't the case during unit
132   // tests (see chromium-os:37969).
133   size_t payload_size = payload_->size;
134   unsigned actual_operations_weight = kProgressOperationsWeight;
135   if (payload_size)
136     new_overall_progress +=
137         min(static_cast<unsigned>(IntRatio(
138                 total_bytes_received_, payload_size, kProgressDownloadWeight)),
139             kProgressDownloadWeight);
140   else
141     actual_operations_weight += kProgressDownloadWeight;
142 
143   // Only add completed operations if their total number is known; we definitely
144   // expect an update to have at least one operation, so the expectation is that
145   // this will eventually reach |actual_operations_weight|.
146   if (num_total_operations_)
147     new_overall_progress += IntRatio(
148         next_operation_num_, num_total_operations_, actual_operations_weight);
149 
150   // Progress ratio cannot recede, unless our assumptions about the total
151   // payload size, total number of operations, or the monotonicity of progress
152   // is breached.
153   if (new_overall_progress < overall_progress_) {
154     LOG(WARNING) << "progress counter receded from " << overall_progress_
155                  << "% down to " << new_overall_progress << "%; this is a bug";
156     force_log = true;
157   }
158   overall_progress_ = new_overall_progress;
159 
160   // Update chunk index, log as needed: if forced by called, or we completed a
161   // progress chunk, or a timeout has expired.
162   base::TimeTicks curr_time = base::TimeTicks::Now();
163   unsigned curr_progress_chunk =
164       overall_progress_ * kProgressLogMaxChunks / 100;
165   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
166       curr_time > forced_progress_log_time_) {
167     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
168     LogProgress(message_prefix);
169   }
170   last_progress_chunk_ = curr_progress_chunk;
171 }
172 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)173 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
174                                         size_t* count_p,
175                                         size_t max) {
176   const size_t count = *count_p;
177   if (!count)
178     return 0;  // Special case shortcut.
179   size_t read_len = min(count, max - buffer_.size());
180   const char* bytes_start = *bytes_p;
181   const char* bytes_end = bytes_start + read_len;
182   buffer_.reserve(max);
183   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
184   *bytes_p = bytes_end;
185   *count_p = count - read_len;
186   return read_len;
187 }
188 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)189 bool DeltaPerformer::HandleOpResult(bool op_result,
190                                     const char* op_type_name,
191                                     ErrorCode* error) {
192   if (op_result)
193     return true;
194 
195   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
196              << next_operation_num_ << ", which is the operation "
197              << GetPartitionOperationNum() << " in partition \""
198              << partitions_[current_partition_].partition_name() << "\"";
199   if (*error == ErrorCode::kSuccess)
200     *error = ErrorCode::kDownloadOperationExecutionError;
201   return false;
202 }
203 
Close()204 int DeltaPerformer::Close() {
205   // Checkpoint update progress before canceling, so that subsequent attempts
206   // can resume from exactly where update_engine left last time.
207   CheckpointUpdateProgress(true);
208   int err = -CloseCurrentPartition();
209   LOG_IF(ERROR,
210          !payload_hash_calculator_.Finalize() ||
211              !signed_hash_calculator_.Finalize())
212       << "Unable to finalize the hash.";
213   if (!buffer_.empty()) {
214     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
215     if (err >= 0)
216       err = 1;
217   }
218   return -err;
219 }
220 
CloseCurrentPartition()221 int DeltaPerformer::CloseCurrentPartition() {
222   if (!partition_writer_) {
223     return 0;
224   }
225   int err = partition_writer_->Close();
226   partition_writer_ = nullptr;
227   return err;
228 }
229 
OpenCurrentPartition()230 bool DeltaPerformer::OpenCurrentPartition() {
231   if (current_partition_ >= partitions_.size())
232     return false;
233 
234   const PartitionUpdate& partition = partitions_[current_partition_];
235   size_t num_previous_partitions =
236       install_plan_->partitions.size() - partitions_.size();
237   const InstallPlan::Partition& install_part =
238       install_plan_->partitions[num_previous_partitions + current_partition_];
239   auto dynamic_control = boot_control_->GetDynamicPartitionControl();
240   partition_writer_ = CreatePartitionWriter(
241       partition,
242       install_part,
243       dynamic_control,
244       block_size_,
245       interactive_,
246       IsDynamicPartition(install_part.name, install_plan_->target_slot));
247   // Open source fds if we have a delta payload, or for partitions in the
248   // partial update.
249   const bool source_may_exist = manifest_.partial_update() ||
250                                 payload_->type == InstallPayloadType::kDelta;
251   const size_t partition_operation_num = GetPartitionOperationNum();
252 
253   TEST_AND_RETURN_FALSE(partition_writer_->Init(
254       install_plan_, source_may_exist, partition_operation_num));
255   CheckpointUpdateProgress(true);
256   return true;
257 }
258 
GetPartitionOperationNum()259 size_t DeltaPerformer::GetPartitionOperationNum() {
260   return next_operation_num_ -
261          (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
262 }
263 
264 namespace {
265 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)266 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
267   string sha256 = HexEncode(info.hash());
268   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
269             << " size: " << info.size();
270 }
271 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)272 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
273   for (const PartitionUpdate& partition : partitions) {
274     if (partition.has_old_partition_info()) {
275       LogPartitionInfoHash(partition.old_partition_info(),
276                            "old " + partition.partition_name());
277     }
278     LogPartitionInfoHash(partition.new_partition_info(),
279                          "new " + partition.partition_name());
280   }
281 }
282 
283 }  // namespace
284 
IsHeaderParsed() const285 bool DeltaPerformer::IsHeaderParsed() const {
286   return metadata_size_ != 0;
287 }
288 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)289 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
290     const brillo::Blob& payload, ErrorCode* error) {
291   *error = ErrorCode::kSuccess;
292 
293   if (!IsHeaderParsed()) {
294     MetadataParseResult result =
295         payload_metadata_.ParsePayloadHeader(payload, error);
296     if (result != MetadataParseResult::kSuccess)
297       return result;
298 
299     metadata_size_ = payload_metadata_.GetMetadataSize();
300     metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
301     major_payload_version_ = payload_metadata_.GetMajorVersion();
302 
303     // If the metadata size is present in install plan, check for it immediately
304     // even before waiting for that many number of bytes to be downloaded in the
305     // payload. This will prevent any attack which relies on us downloading data
306     // beyond the expected metadata size.
307     if (install_plan_->hash_checks_mandatory) {
308       if (payload_->metadata_size != metadata_size_) {
309         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
310                    << payload_->metadata_size
311                    << ") is missing/incorrect, actual = " << metadata_size_;
312         *error = ErrorCode::kDownloadInvalidMetadataSize;
313         return MetadataParseResult::kError;
314       }
315     }
316 
317     // Check that the |metadata signature size_| and |metadata_size_| are not
318     // very big numbers. This is necessary since |update_engine| needs to write
319     // these values into the buffer before being able to use them, and if an
320     // attacker sets these values to a very big number, the buffer will overflow
321     // and |update_engine| will crash. A simple way of solving this is to check
322     // that the size of both values is smaller than the payload itself.
323     if (metadata_size_ + metadata_signature_size_ > payload_->size) {
324       LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
325                  << " or metadata signature(" << metadata_signature_size_ << ")"
326                  << " is greater than the size of the payload"
327                  << "(" << payload_->size << ")";
328       *error = ErrorCode::kDownloadInvalidMetadataSize;
329       return MetadataParseResult::kError;
330     }
331   }
332 
333   // Now that we have validated the metadata size, we should wait for the full
334   // metadata and its signature (if exist) to be read in before we can parse it.
335   if (payload.size() < metadata_size_ + metadata_signature_size_)
336     return MetadataParseResult::kInsufficientData;
337 
338   // Log whether we validated the size or simply trusting what's in the payload
339   // here. This is logged here (after we received the full metadata data) so
340   // that we just log once (instead of logging n times) if it takes n
341   // DeltaPerformer::Write calls to download the full manifest.
342   if (payload_->metadata_size == metadata_size_) {
343     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
344   } else {
345     // For mandatory-cases, we'd have already returned a kMetadataParseError
346     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
347     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
348                  << payload_->metadata_size
349                  << ") in Omaha response as validation is not mandatory. "
350                  << "Trusting metadata size in payload = " << metadata_size_;
351   }
352 
353   // NOLINTNEXTLINE(whitespace/braces)
354   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
355   if (!payload_verifier) {
356     LOG(ERROR) << "Failed to create payload verifier.";
357     *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
358     if (perform_verification) {
359       return MetadataParseResult::kError;
360     }
361   } else {
362     // We have the full metadata in |payload|. Verify its integrity
363     // and authenticity based on the information we have in Omaha response.
364     *error = payload_metadata_.ValidateMetadataSignature(
365         payload, payload_->metadata_signature, *payload_verifier);
366   }
367   if (*error != ErrorCode::kSuccess) {
368     if (install_plan_->hash_checks_mandatory) {
369       // The autoupdate_CatchBadSignatures test checks for this string
370       // in log-files. Keep in sync.
371       LOG(ERROR) << "Mandatory metadata signature validation failed";
372       return MetadataParseResult::kError;
373     }
374 
375     // For non-mandatory cases, just send a UMA stat.
376     LOG(WARNING) << "Ignoring metadata signature validation failures";
377     *error = ErrorCode::kSuccess;
378   }
379 
380   // The payload metadata is deemed valid, it's safe to parse the protobuf.
381   if (!payload_metadata_.GetManifest(payload, &manifest_)) {
382     LOG(ERROR) << "Unable to parse manifest in update file.";
383     *error = ErrorCode::kDownloadManifestParseError;
384     return MetadataParseResult::kError;
385   }
386 
387   manifest_parsed_ = true;
388   return MetadataParseResult::kSuccess;
389 }
390 
391 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)                        \
392   LOCAL_HISTOGRAM_CUSTOM_TIMES(                                             \
393       "UpdateEngine.DownloadAction.InstallOperation::" + string(_op_name) + \
394           ".Duration",                                                      \
395       (base::TimeTicks::Now() - _start_time),                               \
396       base::TimeDelta::FromMilliseconds(10),                                \
397       base::TimeDelta::FromMinutes(5),                                      \
398       20);
399 
CheckSPLDowngrade()400 bool DeltaPerformer::CheckSPLDowngrade() {
401   if (!manifest_.has_security_patch_level()) {
402     return true;
403   }
404   if (manifest_.security_patch_level().empty()) {
405     return true;
406   }
407   const auto new_spl = manifest_.security_patch_level();
408   const auto current_spl =
409       android::base::GetProperty("ro.build.version.security_patch", "");
410   if (current_spl.empty()) {
411     LOG(WARNING) << "Failed to get ro.build.version.security_patch, unable to "
412                     "determine if this OTA is a SPL downgrade. Assuming this "
413                     "OTA is not SPL downgrade.";
414     return true;
415   }
416   if (new_spl < current_spl) {
417     const auto avb_state =
418         android::base::GetProperty("ro.boot.verifiedbootstate", "green");
419     if (android::base::EqualsIgnoreCase(avb_state, "green")) {
420       LOG(ERROR) << "Target build SPL " << new_spl
421                  << " is older than current build's SPL " << current_spl
422                  << ", this OTA is an SPL downgrade. Your device's "
423                     "ro.boot.verifiedbootstate="
424                  << avb_state
425                  << ", it probably has a locked bootlaoder. Since a locked "
426                     "bootloader will reject SPL downgrade no matter what, we "
427                     "will reject this OTA.";
428       return false;
429     }
430     install_plan_->powerwash_required = true;
431     LOG(WARNING)
432         << "Target build SPL " << new_spl
433         << " is older than current build's SPL " << current_spl
434         << ", this OTA is an SPL downgrade. Data wipe will be required";
435   }
436   return true;
437 }
438 
439 // Wrapper around write. Returns true if all requested bytes
440 // were written, or false on any error, regardless of progress
441 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)442 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
443   *error = ErrorCode::kSuccess;
444   const char* c_bytes = reinterpret_cast<const char*>(bytes);
445 
446   // Update the total byte downloaded count and the progress logs.
447   total_bytes_received_ += count;
448   UpdateOverallProgress(false, "Completed ");
449 
450   while (!manifest_valid_) {
451     // Read data up to the needed limit; this is either maximium payload header
452     // size, or the full metadata size (once it becomes known).
453     const bool do_read_header = !IsHeaderParsed();
454     CopyDataToBuffer(
455         &c_bytes,
456         &count,
457         (do_read_header ? kMaxPayloadHeaderSize
458                         : metadata_size_ + metadata_signature_size_));
459 
460     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
461     if (result == MetadataParseResult::kError)
462       return false;
463     if (result == MetadataParseResult::kInsufficientData) {
464       // If we just processed the header, make an attempt on the manifest.
465       if (do_read_header && IsHeaderParsed())
466         continue;
467 
468       return true;
469     }
470 
471     // Checks the integrity of the payload manifest.
472     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
473       return false;
474     manifest_valid_ = true;
475     if (!install_plan_->is_resume) {
476       auto begin = reinterpret_cast<const char*>(buffer_.data());
477       prefs_->SetString(kPrefsManifestBytes, {begin, buffer_.size()});
478     }
479 
480     // Clear the download buffer.
481     DiscardBuffer(false, metadata_size_);
482 
483     block_size_ = manifest_.block_size();
484 
485     if (!CheckSPLDowngrade()) {
486       *error = ErrorCode::kPayloadTimestampError;
487       return false;
488     }
489 
490     // update estimate_cow_size if VABC is disabled
491     // new_cow_size per partition = partition_size - (#blocks in Copy
492     // operations part of the partition)
493     if (install_plan_->vabc_none) {
494       LOG(INFO) << "Setting Virtual AB Compression algorithm to none";
495       manifest_.mutable_dynamic_partition_metadata()
496           ->set_vabc_compression_param("none");
497       for (auto& partition : *manifest_.mutable_partitions()) {
498         auto new_cow_size = partition.new_partition_info().size();
499         for (const auto& operation : partition.merge_operations()) {
500           if (operation.type() == CowMergeOperation::COW_COPY) {
501             new_cow_size -=
502                 operation.dst_extent().num_blocks() * manifest_.block_size();
503           }
504         }
505         // Every block written to COW device will come with a header which
506         // stores src/dst block info along with other data.
507         const auto cow_metadata_size = partition.new_partition_info().size() /
508                                        manifest_.block_size() *
509                                        sizeof(android::snapshot::CowOperation);
510         // update_engine will emit a label op every op or every two seconds,
511         // whichever one is longer. In the worst case, we add 1 label per
512         // InstallOp. So take size of label ops into account.
513         const auto label_ops_size = partition.operations_size() *
514                                     sizeof(android::snapshot::CowOperation);
515         // Adding extra 2MB headroom just for any unexpected space usage.
516         // If we overrun reserved COW size, entire OTA will fail
517         // and no way for user to retry OTA
518         partition.set_estimate_cow_size(new_cow_size + (1024 * 1024 * 2) +
519                                         cow_metadata_size + label_ops_size);
520         LOG(INFO) << "New COW size for partition " << partition.partition_name()
521                   << " is " << partition.estimate_cow_size();
522       }
523     }
524     if (install_plan_->disable_vabc) {
525       manifest_.mutable_dynamic_partition_metadata()->set_vabc_enabled(false);
526     }
527     if (install_plan_->enable_threading) {
528       manifest_.mutable_dynamic_partition_metadata()
529           ->mutable_vabc_feature_set()
530           ->set_threaded(true);
531       LOG(INFO) << "Attempting to enable multi-threaded compression for VABC";
532     }
533     if (install_plan_->batched_writes) {
534       manifest_.mutable_dynamic_partition_metadata()
535           ->mutable_vabc_feature_set()
536           ->set_batch_writes(true);
537       LOG(INFO) << "Attempting to enable batched writes for VABC";
538     }
539 
540     // This populates |partitions_| and the |install_plan.partitions| with the
541     // list of partitions from the manifest.
542     if (!ParseManifestPartitions(error))
543       return false;
544 
545     // |install_plan.partitions| was filled in, nothing need to be done here if
546     // the payload was already applied, returns false to terminate http fetcher,
547     // but keep |error| as ErrorCode::kSuccess.
548     if (payload_->already_applied)
549       return false;
550 
551     num_total_operations_ = 0;
552     for (const auto& partition : partitions_) {
553       num_total_operations_ += partition.operations_size();
554       acc_num_operations_.push_back(num_total_operations_);
555     }
556 
557     LOG_IF(WARNING,
558            !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
559         << "Unable to save the manifest metadata size.";
560     LOG_IF(WARNING,
561            !prefs_->SetInt64(kPrefsManifestSignatureSize,
562                              metadata_signature_size_))
563         << "Unable to save the manifest signature size.";
564 
565     if (!PrimeUpdateState()) {
566       *error = ErrorCode::kDownloadStateInitializationError;
567       LOG(ERROR) << "Unable to prime the update state.";
568       return false;
569     }
570 
571     if (next_operation_num_ < acc_num_operations_[current_partition_]) {
572       if (!OpenCurrentPartition()) {
573         *error = ErrorCode::kInstallDeviceOpenError;
574         return false;
575       }
576     }
577 
578     if (next_operation_num_ > 0)
579       UpdateOverallProgress(true, "Resuming after ");
580     LOG(INFO) << "Starting to apply update payload operations";
581   }
582 
583   while (next_operation_num_ < num_total_operations_) {
584     // Check if we should cancel the current attempt for any reason.
585     // In this case, *error will have already been populated with the reason
586     // why we're canceling.
587     if (download_delegate_ && download_delegate_->ShouldCancel(error))
588       return false;
589 
590     // We know there are more operations to perform because we didn't reach the
591     // |num_total_operations_| limit yet.
592     if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
593       if (partition_writer_) {
594         if (!partition_writer_->FinishedInstallOps()) {
595           *error = ErrorCode::kDownloadWriteError;
596           return false;
597         }
598       }
599       CloseCurrentPartition();
600       // Skip until there are operations for current_partition_.
601       while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
602         current_partition_++;
603       }
604       if (!OpenCurrentPartition()) {
605         *error = ErrorCode::kInstallDeviceOpenError;
606         return false;
607       }
608     }
609 
610     const InstallOperation& op =
611         partitions_[current_partition_].operations(GetPartitionOperationNum());
612 
613     CopyDataToBuffer(&c_bytes, &count, op.data_length());
614 
615     // Check whether we received all of the next operation's data payload.
616     if (!CanPerformInstallOperation(op))
617       return true;
618 
619     // Validate the operation unconditionally. This helps prevent the
620     // exploitation of vulnerabilities in the patching libraries, e.g. bspatch.
621     // The hash of the patch data for a given operation is embedded in the
622     // payload metadata; and thus has been verified against the public key on
623     // device.
624     // Note: Validate must be called only if CanPerformInstallOperation is
625     // called. Otherwise, we might be failing operations before even if there
626     // isn't sufficient data to compute the proper hash.
627     *error = ValidateOperationHash(op);
628     if (*error != ErrorCode::kSuccess) {
629       if (install_plan_->hash_checks_mandatory) {
630         LOG(ERROR) << "Mandatory operation hash check failed";
631         return false;
632       }
633 
634       // For non-mandatory cases, just send a UMA stat.
635       LOG(WARNING) << "Ignoring operation validation errors";
636       *error = ErrorCode::kSuccess;
637     }
638 
639     // Makes sure we unblock exit when this operation completes.
640     ScopedTerminatorExitUnblocker exit_unblocker =
641         ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
642 
643     base::TimeTicks op_start_time = base::TimeTicks::Now();
644 
645     bool op_result{};
646     const string op_name = InstallOperationTypeName(op.type());
647     switch (op.type()) {
648       case InstallOperation::REPLACE:
649       case InstallOperation::REPLACE_BZ:
650       case InstallOperation::REPLACE_XZ:
651         op_result = PerformReplaceOperation(op);
652         OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
653         break;
654       case InstallOperation::ZERO:
655       case InstallOperation::DISCARD:
656         op_result = PerformZeroOrDiscardOperation(op);
657         OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
658         break;
659       case InstallOperation::SOURCE_COPY:
660         op_result = PerformSourceCopyOperation(op, error);
661         OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
662         break;
663       case InstallOperation::SOURCE_BSDIFF:
664       case InstallOperation::BROTLI_BSDIFF:
665       case InstallOperation::PUFFDIFF:
666       case InstallOperation::ZUCCHINI:
667       case InstallOperation::LZ4DIFF_PUFFDIFF:
668       case InstallOperation::LZ4DIFF_BSDIFF:
669         op_result = PerformDiffOperation(op, error);
670         OP_DURATION_HISTOGRAM(op_name, op_start_time);
671         break;
672       default:
673         op_result = false;
674     }
675     if (!HandleOpResult(op_result, op_name.c_str(), error))
676       return false;
677 
678     next_operation_num_++;
679     UpdateOverallProgress(false, "Completed ");
680     CheckpointUpdateProgress(false);
681   }
682 
683   if (partition_writer_) {
684     TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
685   }
686   CloseCurrentPartition();
687 
688   // In major version 2, we don't add unused operation to the payload.
689   // If we already extracted the signature we should skip this step.
690   if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
691       signatures_message_data_.empty()) {
692     if (manifest_.signatures_offset() != buffer_offset_) {
693       LOG(ERROR) << "Payload signatures offset points to blob offset "
694                  << manifest_.signatures_offset()
695                  << " but signatures are expected at offset " << buffer_offset_;
696       *error = ErrorCode::kDownloadPayloadVerificationError;
697       return false;
698     }
699     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
700     // Needs more data to cover entire signature.
701     if (buffer_.size() < manifest_.signatures_size())
702       return true;
703     if (!ExtractSignatureMessage()) {
704       LOG(ERROR) << "Extract payload signature failed.";
705       *error = ErrorCode::kDownloadPayloadVerificationError;
706       return false;
707     }
708     DiscardBuffer(true, 0);
709     // Since we extracted the SignatureMessage we need to advance the
710     // checkpoint, otherwise we would reload the signature and try to extract
711     // it again.
712     // This is the last checkpoint for an update, force this checkpoint to be
713     // saved.
714     CheckpointUpdateProgress(true);
715   }
716 
717   return true;
718 }
719 
IsManifestValid()720 bool DeltaPerformer::IsManifestValid() {
721   return manifest_valid_;
722 }
723 
ParseManifestPartitions(ErrorCode * error)724 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
725   partitions_.assign(manifest_.partitions().begin(),
726                      manifest_.partitions().end());
727 
728   // For VAB and partial updates, the partition preparation will copy the
729   // dynamic partitions metadata to the target metadata slot, and rename the
730   // slot suffix of the partitions in the metadata.
731   if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
732     uint64_t required_size = 0;
733     if (!PreparePartitionsForUpdate(&required_size)) {
734       if (required_size > 0) {
735         *error = ErrorCode::kNotEnoughSpace;
736       } else {
737         *error = ErrorCode::kInstallDeviceOpenError;
738       }
739       return false;
740     }
741   }
742 
743   // Partitions in manifest are no longer needed after preparing partitions.
744   manifest_.clear_partitions();
745   // TODO(xunchang) TBD: allow partial update only on devices with dynamic
746   // partition.
747   if (manifest_.partial_update()) {
748     std::set<std::string> touched_partitions;
749     for (const auto& partition_update : partitions_) {
750       touched_partitions.insert(partition_update.partition_name());
751     }
752 
753     auto generator = partition_update_generator::Create(boot_control_,
754                                                         manifest_.block_size());
755     std::vector<PartitionUpdate> untouched_static_partitions;
756     TEST_AND_RETURN_FALSE(
757         generator->GenerateOperationsForPartitionsNotInPayload(
758             install_plan_->source_slot,
759             install_plan_->target_slot,
760             touched_partitions,
761             &untouched_static_partitions));
762     partitions_.insert(partitions_.end(),
763                        untouched_static_partitions.begin(),
764                        untouched_static_partitions.end());
765 
766     // Save the untouched dynamic partitions in install plan.
767     std::vector<std::string> dynamic_partitions;
768     if (!boot_control_->GetDynamicPartitionControl()
769              ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
770                                             boot_control_->GetCurrentSlot(),
771                                             &dynamic_partitions)) {
772       LOG(ERROR) << "Failed to load dynamic partitions from slot "
773                  << install_plan_->source_slot;
774       return false;
775     }
776     install_plan_->untouched_dynamic_partitions.clear();
777     for (const auto& name : dynamic_partitions) {
778       if (touched_partitions.find(name) == touched_partitions.end()) {
779         install_plan_->untouched_dynamic_partitions.push_back(name);
780       }
781     }
782   }
783 
784   if (!install_plan_->ParsePartitions(
785           partitions_, boot_control_, block_size_, error)) {
786     return false;
787   }
788   auto&& has_verity = [](const auto& part) {
789     return part.fec_extent().num_blocks() > 0 ||
790            part.hash_tree_extent().num_blocks() > 0;
791   };
792   if (!std::any_of(partitions_.begin(), partitions_.end(), has_verity)) {
793     install_plan_->write_verity = false;
794   }
795 
796   LogPartitionInfo(partitions_);
797   return true;
798 }
799 
PreparePartitionsForUpdate(uint64_t * required_size)800 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
801   // Call static PreparePartitionsForUpdate with hash from
802   // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
803   // preallocated for is the same as the hash of payload being applied.
804   string update_check_response_hash;
805   ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
806                                   &update_check_response_hash));
807   return PreparePartitionsForUpdate(prefs_,
808                                     boot_control_,
809                                     install_plan_->target_slot,
810                                     manifest_,
811                                     update_check_response_hash,
812                                     required_size);
813 }
814 
PreparePartitionsForUpdate(PrefsInterface * prefs,BootControlInterface * boot_control,BootControlInterface::Slot target_slot,const DeltaArchiveManifest & manifest,const std::string & update_check_response_hash,uint64_t * required_size)815 bool DeltaPerformer::PreparePartitionsForUpdate(
816     PrefsInterface* prefs,
817     BootControlInterface* boot_control,
818     BootControlInterface::Slot target_slot,
819     const DeltaArchiveManifest& manifest,
820     const std::string& update_check_response_hash,
821     uint64_t* required_size) {
822   string last_hash;
823   ignore_result(
824       prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
825 
826   bool is_resume = !update_check_response_hash.empty() &&
827                    last_hash == update_check_response_hash;
828 
829   if (is_resume) {
830     LOG(INFO) << "Using previously prepared partitions for update. hash = "
831               << last_hash;
832   } else {
833     LOG(INFO) << "Preparing partitions for new update. last hash = "
834               << last_hash << ", new hash = " << update_check_response_hash;
835     ResetUpdateProgress(prefs, false);
836   }
837 
838   if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
839           boot_control->GetCurrentSlot(),
840           target_slot,
841           manifest,
842           !is_resume /* should update */,
843           required_size)) {
844     LOG(ERROR) << "Unable to initialize partition metadata for slot "
845                << BootControlInterface::SlotName(target_slot);
846     return false;
847   }
848 
849   TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
850                                          update_check_response_hash));
851   LOG(INFO) << "PreparePartitionsForUpdate done.";
852 
853   return true;
854 }
855 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)856 bool DeltaPerformer::CanPerformInstallOperation(
857     const chromeos_update_engine::InstallOperation& operation) {
858   // If we don't have a data blob we can apply it right away.
859   if (!operation.has_data_offset() && !operation.has_data_length())
860     return true;
861 
862   // See if we have the entire data blob in the buffer
863   if (operation.data_offset() < buffer_offset_) {
864     LOG(ERROR) << "we threw away data it seems?";
865     return false;
866   }
867 
868   return (operation.data_offset() + operation.data_length() <=
869           buffer_offset_ + buffer_.size());
870 }
871 
PerformReplaceOperation(const InstallOperation & operation)872 bool DeltaPerformer::PerformReplaceOperation(
873     const InstallOperation& operation) {
874   CHECK(operation.type() == InstallOperation::REPLACE ||
875         operation.type() == InstallOperation::REPLACE_BZ ||
876         operation.type() == InstallOperation::REPLACE_XZ);
877 
878   // Since we delete data off the beginning of the buffer as we use it,
879   // the data we need should be exactly at the beginning of the buffer.
880   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
881 
882   TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation(
883       operation, buffer_.data(), buffer_.size()));
884   // Update buffer
885   DiscardBuffer(true, buffer_.size());
886   return true;
887 }
888 
PerformZeroOrDiscardOperation(const InstallOperation & operation)889 bool DeltaPerformer::PerformZeroOrDiscardOperation(
890     const InstallOperation& operation) {
891   CHECK(operation.type() == InstallOperation::DISCARD ||
892         operation.type() == InstallOperation::ZERO);
893 
894   // These operations have no blob.
895   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
896   TEST_AND_RETURN_FALSE(!operation.has_data_length());
897 
898   return partition_writer_->PerformZeroOrDiscardOperation(operation);
899 }
900 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)901 bool DeltaPerformer::PerformSourceCopyOperation(
902     const InstallOperation& operation, ErrorCode* error) {
903   if (operation.has_src_length())
904     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
905   if (operation.has_dst_length())
906     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
907   return partition_writer_->PerformSourceCopyOperation(operation, error);
908 }
909 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)910 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
911     const RepeatedPtrField<Extent>& extents,
912     uint64_t block_size,
913     uint64_t full_length,
914     string* positions_string) {
915   string ret;
916   uint64_t length = 0;
917   for (const Extent& extent : extents) {
918     int64_t start = extent.start_block() * block_size;
919     uint64_t this_length =
920         min(full_length - length,
921             static_cast<uint64_t>(extent.num_blocks()) * block_size);
922     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
923     length += this_length;
924   }
925   TEST_AND_RETURN_FALSE(length == full_length);
926   if (!ret.empty())
927     ret.resize(ret.size() - 1);  // Strip trailing comma off
928   *positions_string = ret;
929   return true;
930 }
931 
PerformDiffOperation(const InstallOperation & operation,ErrorCode * error)932 bool DeltaPerformer::PerformDiffOperation(const InstallOperation& operation,
933                                           ErrorCode* error) {
934   // Since we delete data off the beginning of the buffer as we use it,
935   // the data we need should be exactly at the beginning of the buffer.
936   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
937   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
938   if (operation.has_src_length())
939     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
940   if (operation.has_dst_length())
941     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
942 
943   TEST_AND_RETURN_FALSE(partition_writer_->PerformDiffOperation(
944       operation, error, buffer_.data(), buffer_.size()));
945   DiscardBuffer(true, buffer_.size());
946   return true;
947 }
948 
ExtractSignatureMessage()949 bool DeltaPerformer::ExtractSignatureMessage() {
950   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
951   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
952   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
953   signatures_message_data_.assign(
954       buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
955 
956   LOG(INFO) << "Extracted signature data of size "
957             << manifest_.signatures_size() << " at "
958             << manifest_.signatures_offset();
959   return true;
960 }
961 
GetPublicKey(string * out_public_key)962 bool DeltaPerformer::GetPublicKey(string* out_public_key) {
963   out_public_key->clear();
964 
965   if (utils::FileExists(public_key_path_.c_str())) {
966     LOG(INFO) << "Verifying using public key: " << public_key_path_;
967     return utils::ReadFile(public_key_path_, out_public_key);
968   }
969 
970   // If this is an official build then we are not allowed to use public key
971   // from Omaha response.
972   if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
973     LOG(INFO) << "Verifying using public key from Omaha response.";
974     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
975                                                out_public_key);
976   }
977   LOG(INFO) << "No public keys found for verification.";
978   return true;
979 }
980 
981 std::pair<std::unique_ptr<PayloadVerifier>, bool>
CreatePayloadVerifier()982 DeltaPerformer::CreatePayloadVerifier() {
983   if (utils::FileExists(update_certificates_path_.c_str())) {
984     LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
985     return {
986         PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
987         true};
988   }
989 
990   string public_key;
991   if (!GetPublicKey(&public_key)) {
992     LOG(ERROR) << "Failed to read public key";
993     return {nullptr, true};
994   }
995 
996   // Skips the verification if the public key is empty.
997   if (public_key.empty()) {
998     return {nullptr, false};
999   }
1000   LOG(INFO) << "Verifing using public key: " << public_key;
1001   return {PayloadVerifier::CreateInstance(public_key), true};
1002 }
1003 
ValidateManifest()1004 ErrorCode DeltaPerformer::ValidateManifest() {
1005   // Perform assorted checks to validation check the manifest, make sure it
1006   // matches data from other sources, and that it is a supported version.
1007   bool has_old_fields = std::any_of(manifest_.partitions().begin(),
1008                                     manifest_.partitions().end(),
1009                                     [](const PartitionUpdate& partition) {
1010                                       return partition.has_old_partition_info();
1011                                     });
1012 
1013   // The presence of an old partition hash is the sole indicator for a delta
1014   // update. Also, always treat the partial update as delta so that we can
1015   // perform the minor version check correctly.
1016   InstallPayloadType actual_payload_type =
1017       (has_old_fields || manifest_.partial_update())
1018           ? InstallPayloadType::kDelta
1019           : InstallPayloadType::kFull;
1020 
1021   if (payload_->type == InstallPayloadType::kUnknown) {
1022     LOG(INFO) << "Detected a '"
1023               << InstallPayloadTypeToString(actual_payload_type)
1024               << "' payload.";
1025     payload_->type = actual_payload_type;
1026   } else if (payload_->type != actual_payload_type) {
1027     LOG(ERROR) << "InstallPlan expected a '"
1028                << InstallPayloadTypeToString(payload_->type)
1029                << "' payload but the downloaded manifest contains a '"
1030                << InstallPayloadTypeToString(actual_payload_type)
1031                << "' payload.";
1032     return ErrorCode::kPayloadMismatchedType;
1033   }
1034   // Check that the minor version is compatible.
1035   // TODO(xunchang) increment minor version & add check for partial update
1036   if (actual_payload_type == InstallPayloadType::kFull) {
1037     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1038       LOG(ERROR) << "Manifest contains minor version "
1039                  << manifest_.minor_version()
1040                  << ", but all full payloads should have version "
1041                  << kFullPayloadMinorVersion << ".";
1042       return ErrorCode::kUnsupportedMinorPayloadVersion;
1043     }
1044   } else {
1045     if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
1046         manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
1047       LOG(ERROR) << "Manifest contains minor version "
1048                  << manifest_.minor_version()
1049                  << " not in the range of supported minor versions ["
1050                  << kMinSupportedMinorPayloadVersion << ", "
1051                  << kMaxSupportedMinorPayloadVersion << "].";
1052       return ErrorCode::kUnsupportedMinorPayloadVersion;
1053     }
1054   }
1055 
1056   ErrorCode error_code = CheckTimestampError();
1057   if (error_code != ErrorCode::kSuccess) {
1058     if (error_code == ErrorCode::kPayloadTimestampError) {
1059       if (!hardware_->AllowDowngrade()) {
1060         return ErrorCode::kPayloadTimestampError;
1061       }
1062       LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
1063                    " the payload with an older timestamp.";
1064     } else {
1065       LOG(ERROR) << "Timestamp check returned "
1066                  << utils::ErrorCodeToString(error_code);
1067       return error_code;
1068     }
1069   }
1070 
1071   // TODO(crbug.com/37661) we should be adding more and more manifest checks,
1072   // such as partition boundaries, etc.
1073 
1074   return ErrorCode::kSuccess;
1075 }
1076 
CheckTimestampError() const1077 ErrorCode DeltaPerformer::CheckTimestampError() const {
1078   bool is_partial_update =
1079       manifest_.has_partial_update() && manifest_.partial_update();
1080   const auto& partitions = manifest_.partitions();
1081 
1082   // Check version field for a given PartitionUpdate object. If an error
1083   // is encountered, set |error_code| accordingly. If downgrade is detected,
1084   // |downgrade_detected| is set. Return true if the program should continue
1085   // to check the next partition or not, or false if it should exit early due
1086   // to errors.
1087   auto&& timestamp_valid = [this](const PartitionUpdate& partition,
1088                                   bool allow_empty_version,
1089                                   bool* downgrade_detected) -> ErrorCode {
1090     const auto& partition_name = partition.partition_name();
1091     if (!partition.has_version()) {
1092       if (hardware_->GetVersionForLogging(partition_name).empty()) {
1093         LOG(INFO) << partition_name << " does't have version, skipping "
1094                   << "downgrade check.";
1095         return ErrorCode::kSuccess;
1096       }
1097 
1098       if (allow_empty_version) {
1099         return ErrorCode::kSuccess;
1100       }
1101       LOG(ERROR)
1102           << "PartitionUpdate " << partition_name
1103           << " doesn't have a version field. Not allowed in partial updates.";
1104       return ErrorCode::kDownloadManifestParseError;
1105     }
1106 
1107     auto error_code =
1108         hardware_->IsPartitionUpdateValid(partition_name, partition.version());
1109     switch (error_code) {
1110       case ErrorCode::kSuccess:
1111         break;
1112       case ErrorCode::kPayloadTimestampError:
1113         *downgrade_detected = true;
1114         LOG(WARNING) << "PartitionUpdate " << partition_name
1115                      << " has an older version than partition on device.";
1116         break;
1117       default:
1118         LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name
1119                    << ") returned" << utils::ErrorCodeToString(error_code);
1120         break;
1121     }
1122     return error_code;
1123   };
1124 
1125   bool downgrade_detected = false;
1126 
1127   if (is_partial_update) {
1128     // for partial updates, all partition MUST have valid timestamps
1129     // But max_timestamp can be empty
1130     for (const auto& partition : partitions) {
1131       auto error_code = timestamp_valid(
1132           partition, false /* allow_empty_version */, &downgrade_detected);
1133       if (error_code != ErrorCode::kSuccess &&
1134           error_code != ErrorCode::kPayloadTimestampError) {
1135         return error_code;
1136       }
1137     }
1138     if (downgrade_detected) {
1139       return ErrorCode::kPayloadTimestampError;
1140     }
1141     return ErrorCode::kSuccess;
1142   }
1143 
1144   // For non-partial updates, check max_timestamp first.
1145   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1146     LOG(ERROR) << "The current OS build timestamp ("
1147                << hardware_->GetBuildTimestamp()
1148                << ") is newer than the maximum timestamp in the manifest ("
1149                << manifest_.max_timestamp() << ")";
1150     return ErrorCode::kPayloadTimestampError;
1151   }
1152   // Otherwise... partitions can have empty timestamps.
1153   for (const auto& partition : partitions) {
1154     auto error_code = timestamp_valid(
1155         partition, true /* allow_empty_version */, &downgrade_detected);
1156     if (error_code != ErrorCode::kSuccess &&
1157         error_code != ErrorCode::kPayloadTimestampError) {
1158       return error_code;
1159     }
1160   }
1161   if (downgrade_detected) {
1162     return ErrorCode::kPayloadTimestampError;
1163   }
1164   return ErrorCode::kSuccess;
1165 }
1166 
ValidateOperationHash(const InstallOperation & operation)1167 ErrorCode DeltaPerformer::ValidateOperationHash(
1168     const InstallOperation& operation) {
1169   if (!operation.data_sha256_hash().size()) {
1170     if (!operation.data_length()) {
1171       // Operations that do not have any data blob won't have any operation
1172       // hash either. So, these operations are always considered validated
1173       // since the metadata that contains all the non-data-blob portions of
1174       // the operation has already been validated. This is true for both HTTP
1175       // and HTTPS cases.
1176       return ErrorCode::kSuccess;
1177     }
1178 
1179     // No hash is present for an operation that has data blobs. This shouldn't
1180     // happen normally for any client that has this code, because the
1181     // corresponding update should have been produced with the operation
1182     // hashes. So if it happens it means either we've turned operation hash
1183     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1184     // One caveat though: The last operation is a unused signature operation
1185     // that doesn't have a hash at the time the manifest is created. So we
1186     // should not complaint about that operation. This operation can be
1187     // recognized by the fact that it's offset is mentioned in the manifest.
1188     if (manifest_.signatures_offset() &&
1189         manifest_.signatures_offset() == operation.data_offset()) {
1190       LOG(INFO) << "Skipping hash verification for signature operation "
1191                 << next_operation_num_ + 1;
1192     } else {
1193       if (install_plan_->hash_checks_mandatory) {
1194         LOG(ERROR) << "Missing mandatory operation hash for operation "
1195                    << next_operation_num_ + 1;
1196         return ErrorCode::kDownloadOperationHashMissingError;
1197       }
1198 
1199       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1200                    << " as there's no operation hash in manifest";
1201     }
1202     return ErrorCode::kSuccess;
1203   }
1204 
1205   brillo::Blob expected_op_hash;
1206   expected_op_hash.assign(operation.data_sha256_hash().data(),
1207                           (operation.data_sha256_hash().data() +
1208                            operation.data_sha256_hash().size()));
1209 
1210   brillo::Blob calculated_op_hash;
1211   if (!HashCalculator::RawHashOfBytes(
1212           buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1213     LOG(ERROR) << "Unable to compute actual hash of operation "
1214                << next_operation_num_;
1215     return ErrorCode::kDownloadOperationHashVerificationError;
1216   }
1217 
1218   if (calculated_op_hash != expected_op_hash) {
1219     LOG(ERROR) << "Hash verification failed for operation "
1220                << next_operation_num_
1221                << ". Expected hash = " << HexEncode(expected_op_hash);
1222     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1223                << " bytes at offset: " << operation.data_offset() << " = "
1224                << HexEncode(calculated_op_hash);
1225     return ErrorCode::kDownloadOperationHashMismatch;
1226   }
1227 
1228   return ErrorCode::kSuccess;
1229 }
1230 
1231 #define TEST_AND_RETURN_VAL(_retval, _condition)              \
1232   do {                                                        \
1233     if (!(_condition)) {                                      \
1234       LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
1235       return _retval;                                         \
1236     }                                                         \
1237   } while (0);
1238 
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1239 ErrorCode DeltaPerformer::VerifyPayload(
1240     const brillo::Blob& update_check_response_hash,
1241     const uint64_t update_check_response_size) {
1242   // Verifies the download size.
1243   if (update_check_response_size !=
1244       metadata_size_ + metadata_signature_size_ + buffer_offset_) {
1245     LOG(ERROR) << "update_check_response_size (" << update_check_response_size
1246                << ") doesn't match metadata_size (" << metadata_size_
1247                << ") + metadata_signature_size (" << metadata_signature_size_
1248                << ") + buffer_offset (" << buffer_offset_ << ").";
1249     return ErrorCode::kPayloadSizeMismatchError;
1250   }
1251 
1252   // Verifies the payload hash.
1253   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1254                       !payload_hash_calculator_.raw_hash().empty());
1255   if (payload_hash_calculator_.raw_hash() != update_check_response_hash) {
1256     LOG(ERROR) << "Actual hash: "
1257                << HexEncode(payload_hash_calculator_.raw_hash())
1258                << ", expected hash: " << HexEncode(update_check_response_hash);
1259     return ErrorCode::kPayloadHashMismatchError;
1260   }
1261 
1262   // NOLINTNEXTLINE(whitespace/braces)
1263   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
1264   if (!perform_verification) {
1265     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1266     return ErrorCode::kSuccess;
1267   }
1268   if (!payload_verifier) {
1269     LOG(ERROR) << "Failed to create the payload verifier.";
1270     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1271   }
1272 
1273   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1274                       !signatures_message_data_.empty());
1275   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1276   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1277                       hash_data.size() == kSHA256Size);
1278 
1279   if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
1280     // The autoupdate_CatchBadSignatures test checks for this string
1281     // in log-files. Keep in sync.
1282     LOG(ERROR) << "Public key verification failed, thus update failed.";
1283     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1284   }
1285 
1286   LOG(INFO) << "Payload hash matches value in payload.";
1287   return ErrorCode::kSuccess;
1288 }
1289 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1290 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1291                                    size_t signed_hash_buffer_size) {
1292   // Update the buffer offset.
1293   if (do_advance_offset)
1294     buffer_offset_ += buffer_.size();
1295 
1296   // Hash the content.
1297   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1298   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1299 
1300   // Swap content with an empty vector to ensure that all memory is released.
1301   brillo::Blob().swap(buffer_);
1302 }
1303 
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1304 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1305                                      const string& update_check_response_hash) {
1306   int64_t next_operation = kUpdateStateOperationInvalid;
1307   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1308         next_operation != kUpdateStateOperationInvalid && next_operation > 0)) {
1309     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateNextOperation
1310                  << " invalid: " << next_operation;
1311     return false;
1312   }
1313 
1314   string interrupted_hash;
1315   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1316         !interrupted_hash.empty() &&
1317         interrupted_hash == update_check_response_hash)) {
1318     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateCheckResponseHash
1319                  << " mismatch, last hash: " << interrupted_hash
1320                  << ", current hash: " << update_check_response_hash << "";
1321     return false;
1322   }
1323 
1324   int64_t resumed_update_failures{};
1325   // Note that storing this value is optional, but if it is there it should
1326   // not be more than the limit.
1327   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1328       resumed_update_failures > kMaxResumedUpdateFailures) {
1329     LOG(WARNING) << "Failed to resume update " << kPrefsResumedUpdateFailures
1330                  << " invalid: " << resumed_update_failures;
1331     return false;
1332   }
1333 
1334   // Validation check the rest.
1335   int64_t next_data_offset = -1;
1336   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1337         next_data_offset >= 0)) {
1338     LOG(WARNING) << "Failed to resume update "
1339                  << kPrefsUpdateStateNextDataOffset
1340                  << " invalid: " << next_data_offset;
1341     return false;
1342   }
1343 
1344   string sha256_context;
1345   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1346         !sha256_context.empty())) {
1347     LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateSHA256Context
1348                  << " is empty.";
1349     return false;
1350   }
1351 
1352   int64_t manifest_metadata_size = 0;
1353   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1354         manifest_metadata_size > 0)) {
1355     LOG(WARNING) << "Failed to resume update " << kPrefsManifestMetadataSize
1356                  << " invalid: " << manifest_metadata_size;
1357     return false;
1358   }
1359 
1360   int64_t manifest_signature_size = 0;
1361   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1362                         &manifest_signature_size) &&
1363         manifest_signature_size >= 0)) {
1364     LOG(WARNING) << "Failed to resume update " << kPrefsManifestSignatureSize
1365                  << " invalid: " << manifest_signature_size;
1366     return false;
1367   }
1368 
1369   return true;
1370 }
1371 
ResetUpdateProgress(PrefsInterface * prefs,bool quick)1372 bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1373   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1374                                         kUpdateStateOperationInvalid));
1375   if (!quick) {
1376     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1377     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1378     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1379     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1380     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1381     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1382     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1383     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1384     prefs->Delete(kPrefsPostInstallSucceeded);
1385     prefs->Delete(kPrefsVerityWritten);
1386 
1387     LOG(INFO) << "Resetting recorded hash for prepared partitions.";
1388     prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
1389   }
1390   return true;
1391 }
1392 
ShouldCheckpoint()1393 bool DeltaPerformer::ShouldCheckpoint() {
1394   base::TimeTicks curr_time = base::TimeTicks::Now();
1395   if (curr_time > update_checkpoint_time_) {
1396     update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
1397     return true;
1398   }
1399   return false;
1400 }
1401 
CheckpointUpdateProgress(bool force)1402 bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
1403   if (!force && !ShouldCheckpoint()) {
1404     return false;
1405   }
1406   Terminator::set_exit_blocked(true);
1407   if (last_updated_operation_num_ != next_operation_num_ || force) {
1408     // Resets the progress in case we die in the middle of the state update.
1409     ResetUpdateProgress(prefs_, true);
1410     if (!signatures_message_data_.empty()) {
1411       // Save the signature blob because if the update is interrupted after the
1412       // download phase we don't go through this path anymore. Some alternatives
1413       // to consider:
1414       //
1415       // 1. On resume, re-download the signature blob from the server and
1416       // re-verify it.
1417       //
1418       // 2. Verify the signature as soon as it's received and don't checkpoint
1419       // the blob and the signed sha-256 context.
1420       LOG_IF(WARNING,
1421              !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1422                                 signatures_message_data_))
1423           << "Unable to store the signature blob.";
1424     }
1425     TEST_AND_RETURN_FALSE(prefs_->SetString(
1426         kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
1427     TEST_AND_RETURN_FALSE(
1428         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1429                           signed_hash_calculator_.GetContext()));
1430     TEST_AND_RETURN_FALSE(
1431         prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
1432     last_updated_operation_num_ = next_operation_num_;
1433 
1434     if (next_operation_num_ < num_total_operations_) {
1435       size_t partition_index = current_partition_;
1436       while (next_operation_num_ >= acc_num_operations_[partition_index]) {
1437         partition_index++;
1438       }
1439       const size_t partition_operation_num =
1440           next_operation_num_ -
1441           (partition_index ? acc_num_operations_[partition_index - 1] : 0);
1442       const InstallOperation& op =
1443           partitions_[partition_index].operations(partition_operation_num);
1444       TEST_AND_RETURN_FALSE(
1445           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
1446     } else {
1447       TEST_AND_RETURN_FALSE(
1448           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
1449     }
1450     if (partition_writer_) {
1451       partition_writer_->CheckpointUpdateProgress(GetPartitionOperationNum());
1452     } else {
1453       CHECK_EQ(next_operation_num_, num_total_operations_)
1454           << "Partition writer is null, we are expected to finish all "
1455              "operations: "
1456           << next_operation_num_ << "/" << num_total_operations_;
1457     }
1458   }
1459   TEST_AND_RETURN_FALSE(
1460       prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
1461   return true;
1462 }
1463 
PrimeUpdateState()1464 bool DeltaPerformer::PrimeUpdateState() {
1465   CHECK(manifest_valid_);
1466 
1467   int64_t next_operation = kUpdateStateOperationInvalid;
1468   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1469       next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
1470     // Initiating a new update, no more state needs to be initialized.
1471     return true;
1472   }
1473   next_operation_num_ = next_operation;
1474 
1475   // Resuming an update -- load the rest of the update state.
1476   int64_t next_data_offset = -1;
1477   TEST_AND_RETURN_FALSE(
1478       prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1479       next_data_offset >= 0);
1480   buffer_offset_ = next_data_offset;
1481 
1482   // The signed hash context and the signature blob may be empty if the
1483   // interrupted update didn't reach the signature.
1484   string signed_hash_context;
1485   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1486                         &signed_hash_context)) {
1487     TEST_AND_RETURN_FALSE(
1488         signed_hash_calculator_.SetContext(signed_hash_context));
1489   }
1490 
1491   prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
1492 
1493   string hash_context;
1494   TEST_AND_RETURN_FALSE(
1495       prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
1496       payload_hash_calculator_.SetContext(hash_context));
1497 
1498   int64_t manifest_metadata_size = 0;
1499   TEST_AND_RETURN_FALSE(
1500       prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1501       manifest_metadata_size > 0);
1502   metadata_size_ = manifest_metadata_size;
1503 
1504   int64_t manifest_signature_size = 0;
1505   TEST_AND_RETURN_FALSE(
1506       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1507       manifest_signature_size >= 0);
1508   metadata_signature_size_ = manifest_signature_size;
1509 
1510   // Advance the download progress to reflect what doesn't need to be
1511   // re-downloaded.
1512   total_bytes_received_ += buffer_offset_;
1513 
1514   // Speculatively count the resume as a failure.
1515   int64_t resumed_update_failures{};
1516   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1517     resumed_update_failures++;
1518   } else {
1519     resumed_update_failures = 1;
1520   }
1521   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1522   return true;
1523 }
1524 
IsDynamicPartition(const std::string & part_name,uint32_t slot)1525 bool DeltaPerformer::IsDynamicPartition(const std::string& part_name,
1526                                         uint32_t slot) {
1527   return boot_control_->GetDynamicPartitionControl()->IsDynamicPartition(
1528       part_name, slot);
1529 }
1530 
CreatePartitionWriter(const PartitionUpdate & partition_update,const InstallPlan::Partition & install_part,DynamicPartitionControlInterface * dynamic_control,size_t block_size,bool is_interactive,bool is_dynamic_partition)1531 std::unique_ptr<PartitionWriterInterface> DeltaPerformer::CreatePartitionWriter(
1532     const PartitionUpdate& partition_update,
1533     const InstallPlan::Partition& install_part,
1534     DynamicPartitionControlInterface* dynamic_control,
1535     size_t block_size,
1536     bool is_interactive,
1537     bool is_dynamic_partition) {
1538   return partition_writer::CreatePartitionWriter(
1539       partition_update,
1540       install_part,
1541       dynamic_control,
1542       block_size_,
1543       interactive_,
1544       IsDynamicPartition(install_part.name, install_plan_->target_slot));
1545 }
1546 
1547 }  // namespace chromeos_update_engine
1548